17
17
from .. import TIMESERIES_DB
18
18
from .index import MetricIndex , Point , find_metric
19
19
from .queries import default_chart_query , math_map , operator_lookup
20
+ from .retention_policies import _make_policy , default_rp_policy
20
21
21
22
logger = logging .getLogger (__name__ )
22
23
@@ -70,7 +71,10 @@ def __init__(self, db_name='metric'):
70
71
def create_database (self ):
71
72
""" creates connection to elasticsearch """
72
73
connections .create_connection (hosts = [TIMESERIES_DB ['HOST' ]])
73
- self .get_db
74
+ db = self .get_db
75
+ # Skip if support for Index Lifecycle Management is disabled or no privileges
76
+ self .ilm_enabled = db .ilm .start ()['acknowledged' ]
77
+ self .create_or_alter_retention_policy (name = 'default' )
74
78
75
79
def drop_database (self ):
76
80
""" deletes all indices """
@@ -81,27 +85,48 @@ def drop_database(self):
81
85
@cached_property
82
86
def get_db (self ):
83
87
""" Returns an ``Elasticsearch Client`` instance """
84
- # TODO: AUTHENTICATION remains see `SecurityClient`
85
88
return Elasticsearch (
86
89
[f"{ TIMESERIES_DB ['HOST' ]} :{ TIMESERIES_DB ['PORT' ]} " ],
87
90
http_auth = (TIMESERIES_DB ['USER' ], TIMESERIES_DB ['PASSWORD' ]),
88
91
retry_on_timeout = True ,
89
92
)
90
93
91
- def create_or_alter_retention_policy (self , name , duration ):
92
- """ creates or alters existing retention policy if necessary """
93
- # TODO
94
- pass
94
+ def create_or_alter_retention_policy (self , name , duration = None ):
95
+ """
96
+ creates or alters existing retention policy if necessary
97
+
98
+ Note: default retention policy can't be altered with this function
99
+ """
100
+ if not self .ilm_enabled :
101
+ return
102
+ ilm = self .get_db .ilm
103
+ if not duration :
104
+ ilm .put_lifecycle (policy = name , body = default_rp_policy )
105
+ return
106
+ days = f'{ int (duration .split ("h" )[0 ]) // 24 } d'
107
+ duration_changed = False
108
+ try :
109
+ policy = ilm .get_lifecycle ('default' )
110
+ exists = True
111
+ current_duration = policy ['default' ]['policy' ]['phases' ]['hot' ]['actions' ][
112
+ 'rollover'
113
+ ]['max_age' ]
114
+ duration_changed = current_duration != days
115
+ except NotFoundError :
116
+ exists = False
117
+ if not exists or duration_changed :
118
+ policy = _make_policy (days )
119
+ ilm .put_lifecycle (policy = name , body = policy )
95
120
96
121
def query (self , query , precision = None ):
97
122
index = query .pop ('key' )
98
123
return Search (index = index ).from_dict (query ).execute ().to_dict ()
99
124
100
125
def write (self , name , values , ** kwargs ):
101
- # TODO: Add support for retention policy
126
+ rp = kwargs . get ( 'retention_policy' )
102
127
tags = kwargs .get ('tags' )
103
128
timestamp = kwargs .get ('timestamp' )
104
- metric_id = find_metric (name , tags , add = True )
129
+ metric_id = find_metric (self . get_db , name , tags , rp , add = True )
105
130
metric_index = MetricIndex ().get (metric_id , index = name )
106
131
point = Point (fields = values , time = timestamp or datetime .now ())
107
132
metric_index .points .append (point )
@@ -111,13 +136,10 @@ def read(self, key, fields, tags, limit=1, order='-time', **kwargs):
111
136
extra_fields = kwargs .get ('extra_fields' )
112
137
time_format = kwargs .get ('time_format' )
113
138
# since = kwargs.get('since')
114
- metric_id = find_metric (key , tags )
139
+ metric_id = find_metric (self . get_db , key , tags )
115
140
if not metric_id :
116
- return list ()
117
- try :
118
- metric_index = MetricIndex ().get (metric_id , index = key )
119
- except NotFoundError :
120
141
return []
142
+ metric_index = self .get_db .get (index = key , id = metric_id )
121
143
if order == 'time' :
122
144
points = list (metric_index .points [0 :limit ])
123
145
elif order == '-time' :
@@ -127,33 +149,27 @@ def read(self, key, fields, tags, limit=1, order='-time', **kwargs):
127
149
f'Invalid order "{ order } " passed.\n You may pass "time" / "-time" to get '
128
150
'result sorted in ascending /descending order respectively.'
129
151
)
130
- if not points :
131
- return list ()
132
152
# distinguish between traffic and clients
133
153
for point in list (points ):
134
154
if fields not in point .fields .to_dict ():
135
155
points .remove (point )
136
156
if extra_fields and extra_fields != '*' :
137
157
assert isinstance (extra_fields , list )
138
- _points = []
139
- for point in points :
140
- point = point .to_dict ()
141
- _point = {
158
+ for count , point in enumerate (points ):
159
+ fields_dict = point .to_dict ()['fields' ]
160
+ point = {
142
161
'time' : self ._format_time (point ['time' ], time_format ),
143
- fields : point [ 'fields' ] [fields ],
162
+ fields : fields_dict [fields ],
144
163
}
145
164
for extra_field in extra_fields :
146
- if point ['fields' ].get (extra_field ) is not None :
147
- _point .update ({extra_field : point ['fields' ][extra_field ]})
148
- _points .append (_point )
149
- points = _points
165
+ if fields_dict .get (extra_field ) is not None :
166
+ point .update ({extra_field : fields_dict [extra_field ]})
167
+ points [count ] = point
150
168
elif extra_fields == '*' :
151
- points = [
152
- deep_merge_dicts (
153
- p .fields .to_dict (), {'time' : self ._format_time (p .time , time_format )}
169
+ for count , point in enumerate ( points ):
170
+ points [ count ] = deep_merge_dicts (
171
+ point .fields .to_dict (), {'time' : self ._format_time (point .time , time_format )}
154
172
)
155
- for p in points
156
- ]
157
173
else :
158
174
points = [
159
175
deep_merge_dicts (
@@ -210,12 +226,14 @@ def _fill_points(self, query, points):
210
226
211
227
def delete_metric_data (self , key = None , tags = None ):
212
228
"""
213
- deletes a specific metric based on the key and tags
214
- provided, you may also choose to delete all metrics
229
+ deletes a specific metric based on given key and tags;
230
+ deletes all metrics if neither provided
215
231
"""
216
232
if key and tags :
217
- metric_id = find_metric (key , tags )
233
+ metric_id = find_metric (self . get_db , key , tags )
218
234
self .get_db .delete (index = key , id = metric_id )
235
+ elif key :
236
+ self .get_db .indices .delete (index = key , ignore = [400 , 404 ])
219
237
else :
220
238
self .get_db .indices .delete (index = '*' , ignore = [400 , 404 ])
221
239
@@ -317,7 +335,7 @@ def default_chart_query(self, tags):
317
335
return q
318
336
319
337
320
- # Old data - delete by query (inefficient) / retention policy - Index lifecycle management
338
+ # TODO:
321
339
# Fix Average - currently it's computing average over all fields!
322
340
# Time Interval - fix range
323
341
# Device query
0 commit comments