Skip to content

Commit 6842590

Browse files
committed
Added bucketing for latencies
1 parent 7537217 commit 6842590

File tree

4 files changed

+183
-144
lines changed

4 files changed

+183
-144
lines changed

splitio/api/telemetry.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def record_unique_keys(self, uniques):
4141
if not 200 <= response.status_code < 300:
4242
raise APIException(response.body, response.status_code)
4343
except HttpClientException as exc:
44-
_LOGGER.error(
44+
_LOGGER.info(
4545
'Error posting unique keys because an exception was raised by the HTTPClient'
4646
)
4747
_LOGGER.debug('Error: ', exc_info=True)
@@ -65,7 +65,7 @@ def record_init(self, configs):
6565
if not 200 <= response.status_code < 300:
6666
raise APIException(response.body, response.status_code)
6767
except HttpClientException as exc:
68-
_LOGGER.error(
68+
_LOGGER.info(
6969
'Error posting init config because an exception was raised by the HTTPClient'
7070
)
7171
_LOGGER.debug('Error: ', exc_info=True)
@@ -89,7 +89,7 @@ def record_stats(self, stats):
8989
if not 200 <= response.status_code < 300:
9090
raise APIException(response.body, response.status_code)
9191
except HttpClientException as exc:
92-
_LOGGER.error(
92+
_LOGGER.info(
9393
'Error posting runtime stats because an exception was raised by the HTTPClient'
9494
)
9595
_LOGGER.debug('Error: ', exc_info=True)

splitio/models/telemetry.py

Lines changed: 28 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -113,37 +113,33 @@ def __init__(self):
113113
def _reset_all(self):
114114
"""Reset variables"""
115115
with self._lock:
116-
self._treatment = []
117-
self._treatments = []
118-
self._treatment_with_config = []
119-
self._treatments_with_config = []
120-
self._track = []
116+
self._treatment = [0] * 23
117+
self._treatments = [0] * 23
118+
self._treatment_with_config = [0] * 23
119+
self._treatments_with_config = [0] * 23
120+
self._track = [0] * 23
121121

122122
def add_latency(self, method, latency):
123123
"""
124124
Add Latency method
125125
126126
:param method: passed method name
127127
:type method: str
128-
:param latency: amount of latency
128+
:param latency: amount of latency in microseconds
129129
:type latency: int
130130
"""
131+
latency_bucket = get_latency_bucket_index(latency)
131132
with self._lock:
132133
if method == TREATMENT:
133-
if len(self._treatment) < MAX_LATENCY_BUCKET_COUNT:
134-
self._treatment.append(latency)
134+
self._treatment[latency_bucket] = self._treatment[latency_bucket] + 1
135135
elif method == TREATMENTS:
136-
if len(self._treatments) < MAX_LATENCY_BUCKET_COUNT:
137-
self._treatments.append(latency)
136+
self._treatments[latency_bucket] = self._treatments[latency_bucket] + 1
138137
elif method == TREATMENT_WITH_CONFIG:
139-
if len(self._treatment_with_config) < MAX_LATENCY_BUCKET_COUNT:
140-
self._treatment_with_config.append(latency)
138+
self._treatment_with_config[latency_bucket] = self._treatment_with_config[latency_bucket] + 1
141139
elif method == TREATMENTS_WITH_CONFIG:
142-
if len(self._treatments_with_config) < MAX_LATENCY_BUCKET_COUNT:
143-
self._treatments_with_config.append(latency)
140+
self._treatments_with_config[latency_bucket] = self._treatments_with_config[latency_bucket] + 1
144141
elif method == TRACK:
145-
if len(self._track) < MAX_LATENCY_BUCKET_COUNT:
146-
self._track.append(latency)
142+
self._track[latency_bucket] = self._track[latency_bucket] + 1
147143
else:
148144
return
149145

@@ -175,45 +171,39 @@ def __init__(self):
175171
def _reset_all(self):
176172
"""Reset variables"""
177173
with self._lock:
178-
self._split = []
179-
self._segment = []
180-
self._impression = []
181-
self._impression_count = []
182-
self._event =[]
183-
self._telemetry = []
184-
self._token = []
174+
self._split = [0] * 23
175+
self._segment = [0] * 23
176+
self._impression = [0] * 23
177+
self._impression_count = [0] * 23
178+
self._event = [0] * 23
179+
self._telemetry = [0] * 23
180+
self._token = [0] * 23
185181

186182
def add_latency(self, resource, latency):
187183
"""
188184
Add Latency method
189185
190186
:param resource: passed resource name
191187
:type resource: str
192-
:param latency: amount of latency
188+
:param latency: amount of latency in microseconds
193189
:type latency: int
194190
"""
191+
latency_bucket = get_latency_bucket_index(latency)
195192
with self._lock:
196193
if resource == SPLIT:
197-
if len(self._split) < MAX_LATENCY_BUCKET_COUNT:
198-
self._split.append(latency)
194+
self._split[latency_bucket] = self._split[latency_bucket] + 1
199195
elif resource == SEGMENT:
200-
if len(self._segment) < MAX_LATENCY_BUCKET_COUNT:
201-
self._segment.append(latency)
196+
self._segment[latency_bucket] = self._segment[latency_bucket] + 1
202197
elif resource == IMPRESSION:
203-
if len(self._impression) < MAX_LATENCY_BUCKET_COUNT:
204-
self._impression.append(latency)
198+
self._impression[latency_bucket] = self._impression[latency_bucket] + 1
205199
elif resource == IMPRESSION_COUNT:
206-
if len(self._impression_count) < MAX_LATENCY_BUCKET_COUNT:
207-
self._impression_count.append(latency)
200+
self._impression_count[latency_bucket] = self._impression_count[latency_bucket] + 1
208201
elif resource == EVENT:
209-
if len(self._event) < MAX_LATENCY_BUCKET_COUNT:
210-
self._event.append(latency)
202+
self._event[latency_bucket] = self._event[latency_bucket] + 1
211203
elif resource == TELEMETRY:
212-
if len(self._telemetry) < MAX_LATENCY_BUCKET_COUNT:
213-
self._telemetry.append(latency)
204+
self._telemetry[latency_bucket] = self._telemetry[latency_bucket] + 1
214205
elif resource == TOKEN:
215-
if len(self._token) < MAX_LATENCY_BUCKET_COUNT:
216-
self._token.append(latency)
206+
self._token[latency_bucket] = self._token[latency_bucket] + 1
217207
else:
218208
return
219209

tests/models/test_telemetry_model.py

Lines changed: 71 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,13 @@
11
"""Telemetry model test module."""
22
import os
3+
import random
34

45
from splitio.models.telemetry import StorageType, OperationMode, MethodLatencies, MethodExceptions, \
56
HTTPLatencies, HTTPErrors, LastSynchronization, TelemetryCounters, TelemetryConfig, \
67
StreamingEvent, StreamingEvents, RefreshRates, URLOverrides
78

9+
import splitio.models.telemetry as ModelTelemetry
10+
811
class TelemetryModelTests(object):
912
"""Telemetry model test cases."""
1013

@@ -15,92 +18,71 @@ def test_storage_type_and_operation_mode(self, mocker):
1518
assert(OperationMode.MEMEORY == 'in-memory')
1619
assert(OperationMode.REDIS == 'redis-consumer')
1720

18-
def test_nethod_latencies(self, mocker):
21+
def test_method_latencies(self, mocker):
1922
method_latencies = MethodLatencies()
20-
method_latencies.add_latency('treatment', 10)
21-
assert(method_latencies._treatment == [10])
22-
[method_latencies.add_latency('treatment', 10) for i in range(25)]
23-
assert(len(method_latencies._treatment) == 23)
24-
25-
[method_latencies.add_latency('treatments', i) for i in [20, 30]]
26-
assert(method_latencies._treatments == [20, 30])
27-
[method_latencies.add_latency('treatments', 10) for i in range(25)]
28-
assert(len(method_latencies._treatments) == 23)
29-
30-
method_latencies.add_latency('treatmentWithConfig', 50)
31-
assert(method_latencies._treatment_with_config == [50])
32-
[method_latencies.add_latency('treatmentWithConfig', 10) for i in range(25)]
33-
assert(len(method_latencies._treatment_with_config) == 23)
3423

35-
method_latencies.add_latency('treatmentsWithConfig', 20)
36-
assert(method_latencies._treatments_with_config == [20])
37-
[method_latencies.add_latency('treatmentsWithConfig', 10) for i in range(25)]
38-
assert(len(method_latencies._treatments_with_config) == 23)
39-
40-
method_latencies.add_latency('track', 20)
41-
assert(method_latencies._track == [20])
42-
[method_latencies.add_latency('track', 10) for i in range(25)]
43-
assert(len(method_latencies._track) == 23)
24+
for method in ['treatment', 'treatments', 'treatmentWithConfig', 'treatmentsWithConfig', 'track']:
25+
method_latencies.add_latency(method, 50)
26+
assert(self._get_method_latency(method, method_latencies)[ModelTelemetry.get_latency_bucket_index(50)] == 1)
27+
method_latencies.add_latency(method, 50000000)
28+
assert(self._get_method_latency(method, method_latencies)[ModelTelemetry.get_latency_bucket_index(50000000)] == 1)
29+
for j in range(10):
30+
latency = random.randint(1001, 4987885)
31+
current_count = self._get_method_latency(method, method_latencies)[ModelTelemetry.get_latency_bucket_index(latency)]
32+
[method_latencies.add_latency(method, latency) for i in range(2)]
33+
assert(self._get_method_latency(method, method_latencies)[ModelTelemetry.get_latency_bucket_index(latency)] == 2 + current_count)
4434

4535
method_latencies.pop_all()
46-
assert(method_latencies._track == [])
47-
assert(method_latencies._treatment == [])
48-
assert(method_latencies._treatments == [])
49-
assert(method_latencies._treatment_with_config == [])
50-
assert(method_latencies._treatments_with_config == [])
36+
assert(method_latencies._track == [0] * 23)
37+
assert(method_latencies._treatment == [0] * 23)
38+
assert(method_latencies._treatments == [0] * 23)
39+
assert(method_latencies._treatment_with_config == [0] * 23)
40+
assert(method_latencies._treatments_with_config == [0] * 23)
5141

5242
method_latencies.add_latency('treatment', 10)
53-
method_latencies.add_latency('treatments', 20)
54-
method_latencies.add_latency('treatments', 30)
43+
[method_latencies.add_latency('treatments', 20) for i in range(2)]
5544
method_latencies.add_latency('treatmentWithConfig', 50)
5645
method_latencies.add_latency('treatmentsWithConfig', 20)
5746
method_latencies.add_latency('track', 20)
58-
method_latencies.add_latency('track', 60)
5947
latencies = method_latencies.pop_all()
60-
assert(latencies == {'methodLatencies': {'treatment': [10], 'treatments': [20, 30], 'treatmentWithConfig': [50], 'treatmentsWithConfig': [20], 'track': [20, 60]}})
48+
assert(latencies == {'methodLatencies': {'treatment': [1] + [0] * 22, 'treatments': [2] + [0] * 22, 'treatmentWithConfig': [1] + [0] * 22, 'treatmentsWithConfig': [1] + [0] * 22, 'track': [1] + [0] * 22}})
49+
50+
def _get_method_latency(self, resource, storage):
51+
if resource == ModelTelemetry.TREATMENT:
52+
return storage._treatment
53+
elif resource == ModelTelemetry.TREATMENTS:
54+
return storage._treatments
55+
elif resource == ModelTelemetry.TREATMENT_WITH_CONFIG:
56+
return storage._treatment_with_config
57+
elif resource == ModelTelemetry.TREATMENTS_WITH_CONFIG:
58+
return storage._treatments_with_config
59+
elif resource == ModelTelemetry.TRACK:
60+
return storage._track
61+
else:
62+
return
6163

6264
def test_http_latencies(self, mocker):
6365
http_latencies = HTTPLatencies()
6466

65-
http_latencies.add_latency('split', 10)
66-
assert(http_latencies._split == [10])
67-
[http_latencies.add_latency('split', 10) for i in range(25)]
68-
assert(len(http_latencies._split) == 23)
69-
70-
http_latencies.add_latency('segment', 10)
71-
assert(http_latencies._segment == [10])
72-
[http_latencies.add_latency('segment', 10) for i in range(25)]
73-
assert(len(http_latencies._segment) == 23)
74-
75-
http_latencies.add_latency('impression', 10)
76-
assert(http_latencies._impression == [10])
77-
[http_latencies.add_latency('impression', 10) for i in range(25)]
78-
assert(len(http_latencies._impression) == 23)
79-
80-
http_latencies.add_latency('impressionCount', 10)
81-
assert(http_latencies._impression_count == [10])
82-
[http_latencies.add_latency('impressionCount', 10) for i in range(25)]
83-
assert(len(http_latencies._impression_count) == 23)
84-
85-
http_latencies.add_latency('telemetry', 10)
86-
assert(http_latencies._telemetry == [10])
87-
[http_latencies.add_latency('telemetry', 10) for i in range(25)]
88-
assert(len(http_latencies._telemetry) == 23)
89-
90-
http_latencies.add_latency('token', 10)
91-
assert(http_latencies._token == [10])
92-
[http_latencies.add_latency('token', 10) for i in range(25)]
93-
assert(len(http_latencies._token) == 23)
67+
for resource in ['split', 'segment', 'impression', 'impressionCount', 'event', 'telemetry', 'token']:
68+
http_latencies.add_latency(resource, 50)
69+
assert(self._get_http_latency(resource, http_latencies)[ModelTelemetry.get_latency_bucket_index(50)] == 1)
70+
http_latencies.add_latency(resource, 50000000)
71+
assert(self._get_http_latency(resource, http_latencies)[ModelTelemetry.get_latency_bucket_index(50000000)] == 1)
72+
for j in range(10):
73+
latency = random.randint(1001, 4987885)
74+
current_count = self._get_http_latency(resource, http_latencies)[ModelTelemetry.get_latency_bucket_index(latency)]
75+
[http_latencies.add_latency(resource, latency) for i in range(2)]
76+
assert(self._get_http_latency(resource, http_latencies)[ModelTelemetry.get_latency_bucket_index(latency)] == 2 + current_count)
9477

9578
http_latencies.pop_all()
96-
assert(http_latencies._event == [])
97-
assert(http_latencies._impression == [])
98-
assert(http_latencies._impression_count == [])
99-
assert(http_latencies._segment == [])
100-
assert(http_latencies._split == [])
101-
assert(http_latencies._telemetry == [])
102-
assert(http_latencies._token == [])
103-
79+
assert(http_latencies._event == [0] * 23)
80+
assert(http_latencies._impression == [0] * 23)
81+
assert(http_latencies._impression_count == [0] * 23)
82+
assert(http_latencies._segment == [0] * 23)
83+
assert(http_latencies._split == [0] * 23)
84+
assert(http_latencies._telemetry == [0] * 23)
85+
assert(http_latencies._token == [0] * 23)
10486

10587
http_latencies.add_latency('split', 10)
10688
[http_latencies.add_latency('impression', i) for i in [10, 20]]
@@ -110,7 +92,25 @@ def test_http_latencies(self, mocker):
11092
http_latencies.add_latency('telemetry', 70)
11193
[http_latencies.add_latency('token', i) for i in [10, 15]]
11294
latencies = http_latencies.pop_all()
113-
assert(latencies == {'httpLatencies': {'split': [10], 'segment': [40], 'impression': [10, 20], 'impressionCount': [60], 'event': [90], 'telemetry': [70], 'token': [10, 15]}})
95+
assert(latencies == {'httpLatencies': {'split': [1] + [0] * 22, 'segment': [1] + [0] * 22, 'impression': [2] + [0] * 22, 'impressionCount': [1] + [0] * 22, 'event': [1] + [0] * 22, 'telemetry': [1] + [0] * 22, 'token': [2] + [0] * 22}})
96+
97+
def _get_http_latency(self, resource, storage):
98+
if resource == ModelTelemetry.SPLIT:
99+
return storage._split
100+
elif resource == ModelTelemetry.SEGMENT:
101+
return storage._segment
102+
elif resource == ModelTelemetry.IMPRESSION:
103+
return storage._impression
104+
elif resource == ModelTelemetry.IMPRESSION_COUNT:
105+
return storage._impression_count
106+
elif resource == ModelTelemetry.EVENT:
107+
return storage._event
108+
elif resource == ModelTelemetry.TELEMETRY:
109+
return storage._telemetry
110+
elif resource == ModelTelemetry.TOKEN:
111+
return storage._token
112+
else:
113+
return
114114

115115
def test_method_exceptions(self, mocker):
116116
method_exception = MethodExceptions()

0 commit comments

Comments
 (0)