Skip to content

Commit c589272

Browse files
author
mwatson
committed
Memory optimization for metrics for very high volume scenario
1 parent c78b355 commit c589272

File tree

1 file changed

+17
-6
lines changed

1 file changed

+17
-6
lines changed

Src/StackifyLib/Internal/Metrics/MetricClient.cs

Lines changed: 17 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,11 @@ static MetricClient()
3131
_Timer = new Timer(UploadMetricsCheck, null, TimeSpan.FromSeconds(5), TimeSpan.FromSeconds(5));
3232
}
3333

34+
public static int QueueSize
35+
{
36+
get { return _MetricQueue.Count; }
37+
}
38+
3439
/// <summary>
3540
/// Used to make sure we report 0 values if nothing new comes in
3641
/// </summary>
@@ -197,17 +202,21 @@ private static void Aggregate(MetricAggregate aggregate)
197202
/// <summary>
198203
/// Read everything in the queue up to a certain time point
199204
/// </summary>
200-
private static void ReadQueuedMetricsBatch(DateTime maxDate)
205+
private static void ReadAllQueuedMetrics()
201206
{
207+
DateTime maxDate = DateTime.UtcNow; //read only up until now so it doesn't get stuck in an endless loop
202208
//Loop through add sum up the totals of the counts and values by aggregate key then pass it all in at once to update the aggregate dictionary so it is done in one pass
203209

204210
//key is the aggregate key which is the metric name, type and rounded minute of the occurrence
205-
211+
206212
var batches = new Dictionary<string, MetricAggregate>();
207-
213+
214+
long processed = 0;
215+
208216
Metric metric;
209217
while (_MetricQueue.TryDequeue(out metric))
210218
{
219+
processed++;
211220
metric.CalcAndSetAggregateKey();
212221

213222
if (!batches.ContainsKey(metric.AggregateKey))
@@ -261,6 +270,8 @@ private static void ReadQueuedMetricsBatch(DateTime maxDate)
261270
}
262271
}
263272

273+
StackifyLib.Utils.StackifyAPILogger.Log(string.Format("Read queued metrics processed {0} for max date {1}", processed, maxDate));
274+
264275
foreach (var batch in batches)
265276
{
266277
Aggregate(batch.Value);
@@ -273,7 +284,7 @@ private static void UploadMetricsCheck(object state)
273284

274285
_Timer.Change(-1, -1);
275286

276-
double seconds = 15;
287+
double seconds = 2; //read quickly in case there is a very high volume to keep queue size down
277288

278289
if (!_StopRequested)
279290
{
@@ -317,8 +328,8 @@ public static bool UploadMetrics(DateTime currentMinute)
317328
List<KeyValuePair<string, MetricAggregate>> metrics = new List<KeyValuePair<string, MetricAggregate>>();
318329
try
319330
{
320-
//read everything up to the start of the current minute
321-
ReadQueuedMetricsBatch(currentMinute);
331+
//read everything up to now
332+
ReadAllQueuedMetrics();
322333

323334
//ensures all the aggregate keys exists for any previous metrics so we report zeros on no changes
324335
HandleZeroReports(currentMinute);

0 commit comments

Comments
 (0)