17
17
from .. import TIMESERIES_DB
18
18
from .index import MetricIndex , Point , find_metric
19
19
from .queries import default_chart_query , math_map , operator_lookup
20
+ from .retention_policies import _make_policy , default_rp_policy
20
21
21
22
logger = logging .getLogger (__name__ )
22
23
@@ -70,7 +71,10 @@ def __init__(self, db_name='metric'):
70
71
def create_database (self ):
71
72
""" creates connection to elasticsearch """
72
73
connections .create_connection (hosts = [TIMESERIES_DB ['HOST' ]])
73
- self .get_db
74
+ db = self .get_db
75
+ # Skip if support for Index Lifecycle Management is disabled or no privileges
76
+ self .ilm_enabled = db .ilm .start ()['acknowledged' ]
77
+ self .create_or_alter_retention_policy (name = 'default' )
74
78
75
79
def drop_database (self ):
76
80
""" deletes all indices """
@@ -81,27 +85,48 @@ def drop_database(self):
81
85
@cached_property
82
86
def get_db (self ):
83
87
""" Returns an ``Elasticsearch Client`` instance """
84
- # TODO: AUTHENTICATION remains see `SecurityClient`
85
88
return Elasticsearch (
86
89
[f"{ TIMESERIES_DB ['HOST' ]} :{ TIMESERIES_DB ['PORT' ]} " ],
87
90
http_auth = (TIMESERIES_DB ['USER' ], TIMESERIES_DB ['PASSWORD' ]),
88
91
retry_on_timeout = True ,
89
92
)
90
93
91
- def create_or_alter_retention_policy (self , name , duration ):
92
- """ creates or alters existing retention policy if necessary """
93
- # TODO
94
- pass
94
+ def create_or_alter_retention_policy (self , name , duration = None ):
95
+ """
96
+ creates or alters existing retention policy if necessary
97
+
98
+ Note: default retention policy can't be altered with this function
99
+ """
100
+ if not self .ilm_enabled :
101
+ return
102
+ ilm = self .get_db .ilm
103
+ if not duration :
104
+ ilm .put_lifecycle (policy = name , body = default_rp_policy )
105
+ return
106
+ days = f'{ int (duration .split ("h" )[0 ]) / 24 } d'
107
+ duration_changed = False
108
+ try :
109
+ policy = ilm .get_lifecycle ('default' )
110
+ exists = True
111
+ current_duration = policy ['default' ]['policy' ]['phases' ]['hot' ]['actions' ][
112
+ 'rollover'
113
+ ]['max_age' ]
114
+ duration_changed = current_duration != days
115
+ except NotFoundError :
116
+ exists = False
117
+ if not exists or duration_changed :
118
+ policy = _make_policy (days )
119
+ ilm .put_lifecycle (policy = name , body = policy )
95
120
96
121
def query (self , query , precision = None ):
97
122
index = query .pop ('key' )
98
123
return Search (index = index ).from_dict (query ).execute ().to_dict ()
99
124
100
125
def write (self , name , values , ** kwargs ):
101
- # TODO: Add support for retention policy
126
+ rp = kwargs . get ( 'retention_policy' )
102
127
tags = kwargs .get ('tags' )
103
128
timestamp = kwargs .get ('timestamp' )
104
- metric_id = find_metric (name , tags , add = True )
129
+ metric_id = find_metric (self . get_db , name , tags , rp , add = True )
105
130
metric_index = MetricIndex ().get (metric_id , index = name )
106
131
point = Point (fields = values , time = timestamp or datetime .now ())
107
132
metric_index .points .append (point )
@@ -110,14 +135,12 @@ def write(self, name, values, **kwargs):
110
135
def read (self , key , fields , tags , limit = 1 , order = '-time' , ** kwargs ):
111
136
extra_fields = kwargs .get ('extra_fields' )
112
137
time_format = kwargs .get ('time_format' )
138
+ # TODO: It will be of the form 'now() - <int>s'
113
139
# since = kwargs.get('since')
114
- metric_id = find_metric (key , tags )
140
+ metric_id = find_metric (self . get_db , key , tags )
115
141
if not metric_id :
116
- return list ()
117
- try :
118
- metric_index = MetricIndex ().get (metric_id , index = key )
119
- except NotFoundError :
120
142
return []
143
+ metric_index = self .get_db .get (index = key , id = metric_id )
121
144
if order == 'time' :
122
145
points = list (metric_index .points [0 :limit ])
123
146
elif order == '-time' :
@@ -127,33 +150,28 @@ def read(self, key, fields, tags, limit=1, order='-time', **kwargs):
127
150
f'Invalid order "{ order } " passed.\n You may pass "time" / "-time" to get '
128
151
'result sorted in ascending /descending order respectively.'
129
152
)
130
- if not points :
131
- return list ()
132
153
# distinguish between traffic and clients
133
154
for point in list (points ):
134
155
if fields not in point .fields .to_dict ():
135
156
points .remove (point )
136
157
if extra_fields and extra_fields != '*' :
137
158
assert isinstance (extra_fields , list )
138
- _points = []
139
- for point in points :
140
- point = point .to_dict ()
141
- _point = {
159
+ for count , point in enumerate (points ):
160
+ fields_dict = point .to_dict ()['fields' ]
161
+ point = {
142
162
'time' : self ._format_time (point ['time' ], time_format ),
143
- fields : point [ 'fields' ] [fields ],
163
+ fields : fields_dict [fields ],
144
164
}
145
165
for extra_field in extra_fields :
146
- if point ['fields' ].get (extra_field ) is not None :
147
- _point .update ({extra_field : point ['fields' ][extra_field ]})
148
- _points .append (_point )
149
- points = _points
166
+ if fields_dict .get (extra_field ) is not None :
167
+ point .update ({extra_field : fields_dict [extra_field ]})
168
+ points [count ] = point
150
169
elif extra_fields == '*' :
151
- points = [
152
- deep_merge_dicts (
153
- p .fields .to_dict (), {'time' : self ._format_time (p .time , time_format )}
170
+ for count , point in enumerate (points ):
171
+ points [count ] = deep_merge_dicts (
172
+ point .fields .to_dict (),
173
+ {'time' : self ._format_time (point .time , time_format )},
154
174
)
155
- for p in points
156
- ]
157
175
else :
158
176
points = [
159
177
deep_merge_dicts (
@@ -210,12 +228,14 @@ def _fill_points(self, query, points):
210
228
211
229
def delete_metric_data (self , key = None , tags = None ):
212
230
"""
213
- deletes a specific metric based on the key and tags
214
- provided, you may also choose to delete all metrics
231
+ deletes a specific metric based on given key and tags;
232
+ deletes all metrics if neither provided
215
233
"""
216
234
if key and tags :
217
- metric_id = find_metric (key , tags )
235
+ metric_id = find_metric (self . get_db , key , tags )
218
236
self .get_db .delete (index = key , id = metric_id )
237
+ elif key :
238
+ self .get_db .indices .delete (index = key , ignore = [400 , 404 ])
219
239
else :
220
240
self .get_db .indices .delete (index = '*' , ignore = [400 , 404 ])
221
241
@@ -317,7 +337,7 @@ def default_chart_query(self, tags):
317
337
return q
318
338
319
339
320
- # Old data - delete by query (inefficient) / retention policy - Index lifecycle management
340
+ # TODO:
321
341
# Fix Average - currently it's computing average over all fields!
322
342
# Time Interval - fix range
323
343
# Device query
0 commit comments