Compare commits

...

23 Commits

Author SHA1 Message Date
Naman Verma
1c1d069263 test: integration tests for order by sum (part 2) 2026-03-12 19:49:00 +05:30
Naman Verma
7dc46db2e3 test: split count and p90 group by into 2 tests 2026-03-12 19:39:51 +05:30
Naman Verma
bfcd423a45 chore: remove logger used for debugging 2026-03-12 19:32:06 +05:30
Naman Verma
323b1163e5 test: integration tests for order by sum (part 1) 2026-03-12 19:31:07 +05:30
Naman Verma
673379a46c chore: separate CTE for histogram to be able to apply where clause for limit 2026-03-12 18:56:49 +05:30
Naman Verma
37f490c705 chore: lint issues 2026-03-12 14:35:27 +05:30
Naman Verma
324e34092e chore: rename vars 2026-03-12 12:20:49 +05:30
Naman Verma
2a2c365950 test: integration tests 2026-03-12 12:20:16 +05:30
Naman Verma
14065d39a6 Merge branch 'main' into nv/6204 2026-03-12 01:29:17 +05:30
Naman Verma
bf2133f1ab test: fix meter unit tests 2026-03-10 22:53:39 +05:30
Naman Verma
d7d907f687 test: max parametrising of the unit tests 2026-03-10 16:31:25 +05:30
Naman Verma
76b4549504 test: unit tests 2026-03-10 16:16:53 +05:30
Naman Verma
968a5089ff fix: check for tic when finding remaining keys 2026-03-10 16:16:29 +05:30
Naman Verma
c082bc3d76 chore: also sort by remaining group by keys 2026-03-10 15:43:45 +05:30
Naman Verma
59e0dcc865 chore: move order by ts asc out of all if branches 2026-03-10 15:40:26 +05:30
Naman Verma
89840189ef chore: remove comment 2026-03-10 15:16:09 +05:30
Naman Verma
b64a07db02 fix: limit in where subclause was coming as ? 2026-03-10 15:15:46 +05:30
Naman Verma
38d971b3c9 fix: add partition window when ordering by sum 2026-03-10 15:06:29 +05:30
Naman Verma
f8b266ce05 chore: first draft before testing 2026-03-10 14:55:38 +05:30
Naman Verma
20f7562cbc revert: wrong changes in stmt builder (vv wrong) 2026-03-10 13:36:33 +05:30
Naman Verma
29713964ce Merge branch 'main' into nv/6204 2026-03-10 13:33:26 +05:30
Naman Verma
afb252b4f9 Merge branch 'main' into nv/6204 2026-03-06 08:26:09 +05:30
Naman Verma
c808b4d759 fix: consume order by and limit for metrics in clickhouse query 2026-03-05 09:29:23 +05:30
11 changed files with 1166 additions and 487 deletions

View File

@@ -69,6 +69,7 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
key string // deterministic join of label values
}
seriesMap := map[sKey]*qbtypes.TimeSeries{}
var keyOrder []sKey // preserves ClickHouse row-arrival order
stepMs := uint64(step.Duration.Milliseconds())
@@ -219,6 +220,7 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
if !ok {
series = &qbtypes.TimeSeries{Labels: lblObjs}
seriesMap[key] = series
keyOrder = append(keyOrder, key)
}
series.Values = append(series.Values, &qbtypes.TimeSeriesValue{
Timestamp: ts,
@@ -250,8 +252,8 @@ func readAsTimeSeries(rows driver.Rows, queryWindow *qbtypes.TimeRange, step qbt
Alias: "__result_" + strconv.Itoa(i),
}
}
for k, s := range seriesMap {
buckets[k.agg].Series = append(buckets[k.agg].Series, s)
for _, k := range keyOrder {
buckets[k.agg].Series = append(buckets[k.agg].Series, seriesMap[k])
}
var nonEmpty []*qbtypes.AggregationBucket

View File

@@ -185,22 +185,6 @@ func postProcessMetricQuery(
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
req *qbtypes.QueryRangeRequest,
) *qbtypes.Result {
config := query.Aggregations[0]
spaceAggOrderBy := fmt.Sprintf("%s(%s)", config.SpaceAggregation.StringValue(), config.MetricName)
timeAggOrderBy := fmt.Sprintf("%s(%s)", config.TimeAggregation.StringValue(), config.MetricName)
timeSpaceAggOrderBy := fmt.Sprintf("%s(%s(%s))", config.SpaceAggregation.StringValue(), config.TimeAggregation.StringValue(), config.MetricName)
for idx := range query.Order {
if query.Order[idx].Key.Name == spaceAggOrderBy ||
query.Order[idx].Key.Name == timeAggOrderBy ||
query.Order[idx].Key.Name == timeSpaceAggOrderBy {
query.Order[idx].Key.Name = qbtypes.DefaultOrderByKey
}
}
result = q.applySeriesLimit(result, query.Limit, query.Order)
if len(query.Functions) > 0 {
step := query.StepInterval.Duration.Milliseconds()
functions := q.prepareFillZeroArgsWithStep(query.Functions, req, step)

View File

@@ -132,6 +132,14 @@ func GroupByKeys(keys []qbtypes.GroupByKey) []string {
return k
}
func OrderByKeys(keys []qbtypes.OrderBy) []string {
k := []string{}
for _, key := range keys {
k = append(k, "`"+key.Key.Name+"`")
}
return k
}
func FormatValueForContains(value any) string {
if value == nil {
return ""

View File

@@ -51,7 +51,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, max(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) LIMIT 10) ORDER BY `service.name`, ts ASC",
Args: []any{"signoz_calls_total", uint64(1747785600000), uint64(1747983420000), "cartservice", "cumulative", 0},
},
expectedErr: nil,
@@ -84,7 +84,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) LIMIT 10) ORDER BY `service.name`, ts ASC",
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta"},
},
expectedErr: nil,
@@ -117,7 +117,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'service.name') AS `service.name`, sum(value)/86400 AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'service.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `service.name`, avg(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) LIMIT 10) ORDER BY `service.name`, ts ASC",
Args: []any{"signoz_calls_total", uint64(1747872000000), uint64(1747983420000), "cartservice", "delta", 0},
},
expectedErr: nil,
@@ -150,7 +150,7 @@ func TestStatementBuilder(t *testing.T) {
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(86400)) AS ts, JSONExtractString(labels, 'host.name') AS `host.name`, avg(value) AS per_series_value FROM signoz_meter.distributed_samples AS points WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? AND JSONExtractString(labels, 'host.name') = ? AND LOWER(temporality) LIKE LOWER(?) GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY avg(value) LIMIT 10) ORDER BY `host.name`, ts ASC",
Args: []any{"system.memory.usage", uint64(1747872000000), uint64(1747983420000), "big-data-node-1", "unspecified", 0},
},
expectedErr: nil,

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"log/slog"
"strings"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/flagger"
@@ -546,6 +547,16 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
) (*qbtypes.Statement, error) {
metricType := query.Aggregations[0].Type
spaceAgg := query.Aggregations[0].SpaceAggregation
finalCTE := "__spatial_aggregation_cte"
if metricType == metrictypes.HistogramType {
histogramCTE, histogramCTEArgs, err := b.buildHistogramCTE(query)
if err != nil {
return nil, err
}
cteFragments = append(cteFragments, histogramCTE)
cteArgs = append(cteArgs, histogramCTEArgs)
finalCTE = "__histogram_cte"
}
combined := querybuilder.CombineCTEs(cteFragments)
@@ -555,60 +566,90 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select("*")
sb.From(finalCTE)
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Where(rewrittenExpr)
}
if metricType == metrictypes.HistogramType && spaceAgg.IsPercentile() {
quantile := query.Aggregations[0].SpaceAggregation.Percentile()
sb.Select("ts")
for _, g := range query.GroupBy {
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
groupByKeys := querybuilder.GroupByKeys(query.GroupBy)
hasOrder := len(query.Order) > 0
hasLimit := query.Limit > 0
hasGroupBy := len(groupByKeys) > 0
if !hasGroupBy {
// do nothing, limits and orders don't mean anything
} else if hasOrder && hasLimit {
labelSelectorSubQueryBuilder := sqlbuilder.NewSelectBuilder()
labelSelectorSubQueryBuilder.Select(groupByKeys...)
labelSelectorSubQueryBuilder.From(finalCTE)
labelSelectorOrderClauses := []string{}
for _, o := range query.Order {
key := o.Key.Name
var clause string
if strings.Contains(key, query.Aggregations[0].MetricName) {
clause = fmt.Sprintf("avg(value) %s", o.Direction.StringValue())
} else {
clause = fmt.Sprintf("`%s` %s", key, o.Direction.StringValue())
}
labelSelectorOrderClauses = append(labelSelectorOrderClauses, clause)
}
sb.SelectMore(fmt.Sprintf(
"histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) AS value",
quantile,
))
sb.From("__spatial_aggregation_cte")
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.GroupBy("ts")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Having(rewrittenExpr)
labelSelectorSubQueryBuilder.GroupBy(groupByKeys...)
labelSelectorSubQueryBuilder.OrderBy(labelSelectorOrderClauses...)
labelSelectorSubQuery, _ := labelSelectorSubQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
labelSelectorSubQuery = fmt.Sprintf("%s LIMIT %d", labelSelectorSubQuery, query.Limit)
sb.Where(fmt.Sprintf("(%s) IN (%s)", strings.Join(groupByKeys, ", "), labelSelectorSubQuery))
for _, o := range query.Order {
key := o.Key.Name
var clause string
if strings.Contains(key, query.Aggregations[0].MetricName) {
clause = fmt.Sprintf("avg(value) OVER (PARTITION BY %s) %s", strings.Join(groupByKeys, ", "), o.Direction.StringValue())
} else {
clause = fmt.Sprintf("`%s` %s", key, o.Direction.StringValue())
}
sb.OrderBy(clause)
}
} else if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam != nil {
sb.Select("ts")
for _, g := range query.GroupBy {
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
} else if hasOrder {
// order by without limit: apply order by clauses directly
for _, o := range query.Order {
key := o.Key.Name
if strings.Contains(key, query.Aggregations[0].MetricName) {
sb.OrderBy(fmt.Sprintf("avg(value) OVER (PARTITION BY %s) %s", strings.Join(groupByKeys, ", "), o.Direction.StringValue()))
continue
}
sb.OrderBy(fmt.Sprintf("`%s` %s", o.Key.Name, o.Direction.StringValue()))
}
} else if hasLimit {
labelSelectorSubQueryBuilder := sqlbuilder.NewSelectBuilder()
labelSelectorSubQueryBuilder.Select(groupByKeys...).Distinct()
labelSelectorSubQueryBuilder.From(finalCTE)
labelSelectorSubQueryBuilder.GroupBy(groupByKeys...)
labelSelectorSubQueryBuilder.OrderBy("avg(value) DESC")
labelSelectorSubQuery, _ := labelSelectorSubQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
labelSelectorSubQuery = fmt.Sprintf("%s LIMIT %d", labelSelectorSubQuery, query.Limit)
aggQuery, err := AggregationQueryForHistogramCountWithParams(query.Aggregations[0].ComparisonSpaceAggregationParam)
if err != nil {
return nil, err
}
sb.SelectMore(aggQuery)
sb.From("__spatial_aggregation_cte")
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.GroupBy("ts")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Having(rewrittenExpr)
}
sb.Where(fmt.Sprintf("(%s) IN (%s)", strings.Join(groupByKeys, ", "), labelSelectorSubQuery))
sb.OrderBy(fmt.Sprintf("avg(value) OVER (PARTITION BY %s) DESC", strings.Join(groupByKeys, ", ")))
} else {
// for count aggregation on histograms with no params, the exact result of spatial aggregation can be sent forward
sb.Select("*")
sb.From("__spatial_aggregation_cte")
if query.Having != nil && query.Having.Expression != "" {
rewriter := querybuilder.NewHavingExpressionRewriter()
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
sb.Where(rewrittenExpr)
// grouping without order by or limit: sort by avg(value) DESC with labels as tiebreakers
sb.OrderBy(fmt.Sprintf("avg(value) OVER (PARTITION BY %s) DESC", strings.Join(groupByKeys, ", ")))
}
// add any group-by keys not already in the order-by as tiebreakers
orderKeySet := make(map[string]struct{})
for _, o := range query.Order {
orderKeySet[fmt.Sprintf("`%s`", o.Key.Name)] = struct{}{}
}
for _, g := range groupByKeys {
if _, exists := orderKeySet[g]; !exists {
sb.OrderBy(g)
}
}
sb.OrderBy(querybuilder.GroupByKeys(query.GroupBy)...)
sb.OrderBy("ts")
sb.OrderBy("ts ASC")
if metricType == metrictypes.HistogramType && spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam == nil {
sb.OrderBy("toFloat64(le)")
}
@@ -616,3 +657,45 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
q, a := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
return &qbtypes.Statement{Query: combined + q, Args: append(args, a...)}, nil
}
func (b *MetricQueryStatementBuilder) buildHistogramCTE(
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation],
) (string, []any, error) {
spaceAgg := query.Aggregations[0].SpaceAggregation
histogramCTEQueryBuilder := sqlbuilder.NewSelectBuilder()
if spaceAgg.IsPercentile() {
histogramCTEQueryBuilder.Select("ts")
for _, g := range query.GroupBy {
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
quantile := spaceAgg.Percentile()
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf(
"histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), %.3f) AS value",
quantile,
))
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
histogramCTEQueryBuilder.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
histogramCTEQueryBuilder.GroupBy("ts")
} else if spaceAgg == metrictypes.SpaceAggregationCount && query.Aggregations[0].ComparisonSpaceAggregationParam != nil {
histogramCTEQueryBuilder.Select("ts")
for _, g := range query.GroupBy {
histogramCTEQueryBuilder.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
}
aggQuery, err := AggregationQueryForHistogramCountWithParams(query.Aggregations[0].ComparisonSpaceAggregationParam)
if err != nil {
return "", nil, err
}
histogramCTEQueryBuilder.SelectMore(aggQuery)
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
histogramCTEQueryBuilder.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
histogramCTEQueryBuilder.GroupBy("ts")
} else {
// for count aggregation on histograms with no params, the exact result of spatial aggregation can be sent forward
histogramCTEQueryBuilder.Select("*")
histogramCTEQueryBuilder.From("__spatial_aggregation_cte")
}
histogramQueryCTE, histogramQueryCTEArgs := histogramCTEQueryBuilder.BuildWithFlavor(sqlbuilder.ClickHouse)
histogramCTE := fmt.Sprintf("__histogram_cte AS (%s)", histogramQueryCTE)
return histogramCTE, histogramQueryCTEArgs, nil
}

View File

@@ -15,16 +15,17 @@ import (
)
func TestStatementBuilder(t *testing.T) {
cases := []struct {
name string
requestType qbtypes.RequestType
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
expected qbtypes.Statement
expectedErr error
}{
type baseQuery struct {
name string
query qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]
orderKey string
args []any
cte string
}
bases := []baseQuery{
{
name: "test_cumulative_rate_sum",
requestType: qbtypes.RequestTypeTimeSeries,
name: "cumulative_rate_sum",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -40,24 +41,16 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
orderKey: "service.name",
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`)",
},
{
name: "test_cumulative_rate_sum_with_mat_column",
requestType: qbtypes.RequestTypeTimeSeries,
name: "cumulative_rate_sum_with_mat_column",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -73,24 +66,16 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "materialized.key.name REGEXP 'cartservice' OR service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
orderKey: "service.name",
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "cartservice", "cartservice", "signoz_calls_total", uint64(1747947360000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND (match(JSONExtractString(labels, 'materialized.key.name'), ?) OR JSONExtractString(labels, 'service.name') = ?) GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`)",
},
{
name: "test_delta_rate_sum",
requestType: qbtypes.RequestTypeTimeSeries,
name: "delta_rate_sum",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -106,24 +91,16 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name`, ts",
Args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
},
expectedErr: nil,
orderKey: "service.name",
args: []any{"signoz_calls_total", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_calls_total", uint64(1747947390000), uint64(1747983420000)},
cte: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`)",
},
{
name: "test_histogram_percentile1",
requestType: qbtypes.RequestTypeTimeSeries,
name: "histogram_percentile1",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -139,24 +116,16 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "service.name = 'cartservice'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
Args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
},
expectedErr: nil,
orderKey: "service.name",
args: []any{"signoz_latency", uint64(1747936800000), uint64(1747983420000), "delta", false, "cartservice", "signoz_latency", uint64(1747947390000), uint64(1747983420000)},
cte: "WITH __spatial_aggregation_cte AS (SELECT toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, sum(value)/30 AS value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'service.name') = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY ts, `service.name`, `le`)",
},
{
name: "test_gauge_avg_sum",
requestType: qbtypes.RequestTypeTimeSeries,
name: "gauge_avg_sum",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -172,24 +141,16 @@ func TestStatementBuilder(t *testing.T) {
Filter: &qbtypes.Filter{
Expression: "host.name = 'big-data-node-1'",
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "host.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "host.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`) SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name`, ts",
Args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
},
expectedErr: nil,
orderKey: "host.name",
args: []any{"system.memory.usage", uint64(1747936800000), uint64(1747983420000), "unspecified", false, "big-data-node-1", "system.memory.usage", uint64(1747947390000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `host.name`, avg(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'host.name') AS `host.name` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? AND JSONExtractString(labels, 'host.name') = ? GROUP BY fingerprint, `host.name`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `host.name` ORDER BY fingerprint, ts), __spatial_aggregation_cte AS (SELECT ts, `host.name`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `host.name`)",
},
{
name: "test_histogram_percentile2",
requestType: qbtypes.RequestTypeTimeSeries,
name: "histogram_percentile2",
query: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Signal: telemetrytypes.SignalMetrics,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
@@ -202,23 +163,69 @@ func TestStatementBuilder(t *testing.T) {
SpaceAggregation: metrictypes.SpaceAggregationPercentile95,
},
},
Limit: 10,
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "service.name",
},
},
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
},
},
expected: qbtypes.Statement{
Query: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`) SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name`, ts",
Args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947360000), uint64(1747983420000), 0},
},
expectedErr: nil,
orderKey: "service.name",
args: []any{"http_server_duration_bucket", uint64(1747936800000), uint64(1747983420000), "cumulative", false, "http_server_duration_bucket", uint64(1747947360000), uint64(1747983420000), 0},
cte: "WITH __temporal_aggregation_cte AS (SELECT ts, `service.name`, `le`, multiIf(row_number() OVER rate_window = 1, nan, (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) < 0, per_series_value / (ts - lagInFrame(ts, 1) OVER rate_window), (per_series_value - lagInFrame(per_series_value, 1) OVER rate_window) / (ts - lagInFrame(ts, 1) OVER rate_window)) AS per_series_value FROM (SELECT fingerprint, toStartOfInterval(toDateTime(intDiv(unix_milli, 1000)), toIntervalSecond(30)) AS ts, `service.name`, `le`, max(value) AS per_series_value FROM signoz_metrics.distributed_samples_v4 AS points INNER JOIN (SELECT fingerprint, JSONExtractString(labels, 'service.name') AS `service.name`, JSONExtractString(labels, 'le') AS `le` FROM signoz_metrics.time_series_v4_6hrs WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli <= ? AND LOWER(temporality) LIKE LOWER(?) AND __normalized = ? GROUP BY fingerprint, `service.name`, `le`) AS filtered_time_series ON points.fingerprint = filtered_time_series.fingerprint WHERE metric_name IN (?) AND unix_milli >= ? AND unix_milli < ? GROUP BY fingerprint, ts, `service.name`, `le` ORDER BY fingerprint, ts) WINDOW rate_window AS (PARTITION BY fingerprint ORDER BY fingerprint, ts)), __spatial_aggregation_cte AS (SELECT ts, `service.name`, `le`, sum(per_series_value) AS value FROM __temporal_aggregation_cte WHERE isNaN(per_series_value) = ? GROUP BY ts, `service.name`, `le`)",
},
}
type variant struct {
name string
limit int
hasOrder bool
}
variants := []variant{
{"with_limits", 10, false},
{"without_limits", 0, false},
{"with_order_by", 0, true},
{"with_order_by_and_limits", 10, true},
}
// expectedFinalSelects maps "base/variant" to the final SELECT portion after the CTE.
// The full expected query is: base.cte + expectedFinalSelects[name]
expectedFinalSelects := map[string]string{
// cumulative_rate_sum
"cumulative_rate_sum/with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) LIMIT 10) ORDER BY `service.name`, ts ASC",
"cumulative_rate_sum/without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"cumulative_rate_sum/with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name` asc, ts ASC",
"cumulative_rate_sum/with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT DISTINCT `service.name` FROM __spatial_aggregation_cte ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
// cumulative_rate_sum_with_mat_column
"cumulative_rate_sum_with_mat_column/with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) LIMIT 10) ORDER BY `service.name`, ts ASC",
"cumulative_rate_sum_with_mat_column/without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"cumulative_rate_sum_with_mat_column/with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name` asc, ts ASC",
"cumulative_rate_sum_with_mat_column/with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT DISTINCT `service.name` FROM __spatial_aggregation_cte ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
// delta_rate_sum
"delta_rate_sum/with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) LIMIT 10) ORDER BY `service.name`, ts ASC",
"delta_rate_sum/without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"delta_rate_sum/with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `service.name` asc, ts ASC",
"delta_rate_sum/with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT DISTINCT `service.name` FROM __spatial_aggregation_cte ORDER BY `service.name` asc LIMIT 10) ORDER BY `service.name` asc, ts ASC",
// histogram_percentile1
"histogram_percentile1/with_limits": " SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) LIMIT 10) GROUP BY `service.name`, ts ORDER BY `service.name`, ts ASC",
"histogram_percentile1/without_limits": " SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"histogram_percentile1/with_order_by": " SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name` asc, ts ASC",
"histogram_percentile1/with_order_by_and_limits": " SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT DISTINCT `service.name` FROM __spatial_aggregation_cte ORDER BY `service.name` asc LIMIT 10) GROUP BY `service.name`, ts ORDER BY `service.name` asc, ts ASC",
// gauge_avg_sum
"gauge_avg_sum/with_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT `host.name` FROM __spatial_aggregation_cte GROUP BY `host.name` ORDER BY avg(value) LIMIT 10) ORDER BY `host.name`, ts ASC",
"gauge_avg_sum/without_limits": " SELECT * FROM __spatial_aggregation_cte ORDER BY avg(value) OVER (PARTITION BY `host.name`) DESC, `host.name`, ts ASC",
"gauge_avg_sum/with_order_by": " SELECT * FROM __spatial_aggregation_cte ORDER BY `host.name` asc, ts ASC",
"gauge_avg_sum/with_order_by_and_limits": " SELECT * FROM __spatial_aggregation_cte WHERE (`host.name`) IN (SELECT DISTINCT `host.name` FROM __spatial_aggregation_cte ORDER BY `host.name` asc LIMIT 10) ORDER BY `host.name` asc, ts ASC",
// histogram_percentile2
"histogram_percentile2/with_limits": " SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT `service.name` FROM __spatial_aggregation_cte GROUP BY `service.name` ORDER BY avg(value) LIMIT 10) GROUP BY `service.name`, ts ORDER BY `service.name`, ts ASC",
"histogram_percentile2/without_limits": " SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY avg(value) OVER (PARTITION BY `service.name`) DESC, `service.name`, ts ASC",
"histogram_percentile2/with_order_by": " SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte GROUP BY `service.name`, ts ORDER BY `service.name` asc, ts ASC",
"histogram_percentile2/with_order_by_and_limits": " SELECT ts, `service.name`, histogramQuantile(arrayMap(x -> toFloat64(x), groupArray(le)), groupArray(value), 0.950) AS value FROM __spatial_aggregation_cte WHERE (`service.name`) IN (SELECT DISTINCT `service.name` FROM __spatial_aggregation_cte ORDER BY `service.name` asc LIMIT 10) GROUP BY `service.name`, ts ORDER BY `service.name` asc, ts ASC",
}
fm := NewFieldMapper()
cb := NewConditionBuilder(fm)
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
@@ -227,15 +234,13 @@ func TestStatementBuilder(t *testing.T) {
t.Fatalf("failed to load field keys: %v", err)
}
mockMetadataStore.KeysMap = keys
// NOTE: LoadFieldKeysFromJSON doesn't set Materialized field
// for keys, so we have to set it manually here for testing
if _, ok := mockMetadataStore.KeysMap["materialized.key.name"]; ok {
if len(mockMetadataStore.KeysMap["materialized.key.name"]) > 0 {
mockMetadataStore.KeysMap["materialized.key.name"][0].Materialized = true
}
}
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
fl, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
if err != nil {
t.Fatalf("failed to create flagger: %v", err)
}
@@ -245,23 +250,30 @@ func TestStatementBuilder(t *testing.T) {
mockMetadataStore,
fm,
cb,
flagger,
fl,
)
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
for _, b := range bases {
for _, v := range variants {
name := b.name + "/" + v.name
t.Run(name, func(t *testing.T) {
q := b.query
q.Limit = v.limit
if v.hasOrder {
q.Order = []qbtypes.OrderBy{
{
Key: qbtypes.OrderByKey{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: b.orderKey}},
Direction: qbtypes.OrderDirectionAsc,
},
}
}
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
result, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, qbtypes.RequestTypeTimeSeries, q, nil)
if c.expectedErr != nil {
require.Error(t, err)
require.Contains(t, err.Error(), c.expectedErr.Error())
} else {
require.NoError(t, err)
require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args)
require.Equal(t, c.expected.Warnings, q.Warnings)
}
})
require.Equal(t, b.cte+expectedFinalSelects[name], result.Query)
require.Equal(t, b.args, result.Args)
})
}
}
}

View File

@@ -58,6 +58,8 @@ def build_builder_query(
step_interval: int = DEFAULT_STEP_INTERVAL,
group_by: Optional[List[str]] = None,
filter_expression: Optional[str] = None,
order_by: Optional[List[Dict]] = None,
limit: Optional[int] = None,
functions: Optional[List[Dict]] = None,
disabled: bool = False,
) -> Dict:
@@ -93,6 +95,12 @@ def build_builder_query(
if filter_expression:
spec["filter"] = {"expression": filter_expression}
if order_by:
spec["order"] = order_by
if limit is not None:
spec["limit"] = limit
if functions:
spec["functions"] = functions

View File

@@ -2,16 +2,20 @@
Look at the cumulative_counters_1h.jsonl file for the relevant data
"""
import logging
import os
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Any, Callable, List
from typing import Any, Callable, List, Optional, Union
import pytest
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -71,16 +75,200 @@ def test_rate_with_steady_values_and_reset(
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
def _assert_endpoint_rate_values(endpoint_values: dict) -> None:
# /health: 60 data points (t01-t60), steady +10/min
# rate = 10/60 = 0.167
if "/health" in endpoint_values:
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
if "/products" in endpoint_values:
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
# most values should be 0.333, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values (0.333) for /products, got {count_steady_products}"
# check that non-0.333 values are due to gap averaging (should be lower)
gap_boundary_values = [v["value"] for v in products_values if v["value"] != 0.333]
for val in gap_boundary_values:
assert (
0 < val < 0.333
), f"Gap boundary values should be between 0 and 0.333, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
if "/checkout" in endpoint_values:
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# rate = 5/60 = 0.0833
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
if "/orders" in endpoint_values:
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high rate value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
if "/users" in endpoint_values:
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment rate values (0.0167) for /users, got {count_increment_rate}"
@pytest.mark.parametrize(
"metric_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None, # this is equivalent to sum(metric_name)
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"only_limit",
None, # this is equivalent to sum(metric_name)
3,
3,
["/products", "/health", "/checkout"],
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
5,
["/checkout", "/health", "/orders", "/products", "/users"],
),
(
"asc_lim3",
[build_order_by("endpoint", "asc")],
3,
3,
["/checkout", "/health", "/orders"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
5,
["/users", "/products", "/orders", "/health", "/checkout"],
),
(
"desc_lim3",
[build_order_by("endpoint", "desc")],
3,
3,
["/users", "/products", "/orders"],
),
(
"asc_metric_name",
[build_order_by("sum(test_rate_groupby_asc_metric_name)", "asc")],
None,
5,
["/users", "/orders", "/checkout", "/health", "/products"],
),
(
"asc_metric_name_lim3",
[build_order_by("sum(test_rate_groupby_asc_metric_name_lim3)", "asc")],
3,
3,
["/users", "/orders", "/checkout"],
),
(
"desc_metric_name",
[build_order_by("sum(test_rate_groupby_desc_metric_name)", "desc")],
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"desc_metric_name_lim3",
[build_order_by("sum(test_rate_groupby_desc_metric_name_lim3)", "desc")],
3,
3,
["/products", "/health", "/checkout"],
),
],
)
def test_rate_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
metric_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = "test_rate_groupby"
metric_name = f"test_rate_groupby_{metric_suffix}"
metrics = Metrics.load_from_file(
CUMULATIVE_COUNTERS_FILE,
@@ -97,6 +285,8 @@ def test_rate_group_by_endpoint(
"sum",
temporality="cumulative",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query])
@@ -105,10 +295,23 @@ def test_rate_group_by_endpoint(
data = response.json()
all_series = get_all_series(data, "A")
# Should have 5 different endpoints
assert (
len(all_series) == 5
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
len(all_series) == expected_count
), f"Expected {expected_count} series, got {len(all_series)}"
endpoint_labels = [
series.get("labels", [{}])[0].get("value", "unknown")
for series in all_series
]
if isinstance(expected_endpoints, set):
assert (
set(endpoint_labels) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
else:
assert endpoint_labels == expected_endpoints, (
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
)
# endpoint -> values
endpoint_values = {}
@@ -117,11 +320,6 @@ def test_rate_group_by_endpoint(
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
endpoint_values[endpoint] = values
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
assert (
set(endpoint_values.keys()) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
# at no point rate should be negative
for endpoint, values in endpoint_values.items():
for v in values:
@@ -129,103 +327,4 @@ def test_rate_group_by_endpoint(
v["value"] >= 0
), f"Rate for {endpoint} should not be negative: {v['value']}"
# /health: 60 data points (t01-t60), steady +10/min
# rate = 10/60 = 0.167
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
# most values should be 0.333, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values (0.333) for /products, got {count_steady_products}"
# check that non-0.333 values are due to gap averaging (should be lower)
gap_boundary_values = [v["value"] for v in products_values if v["value"] != 0.333]
for val in gap_boundary_values:
assert (
0 < val < 0.333
), f"Gap boundary values should be between 0 and 0.333, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
# consecutiveness
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# rate = 5/60 = 0.0833
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high rate value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment rate values (0.0167) for /users, got {count_increment_rate}"
_assert_endpoint_rate_values(endpoint_values)

View File

@@ -5,7 +5,7 @@ Look at the multi_temporality_counters_1h.jsonl file for the relevant data
import random
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List
from typing import Callable, List, Optional, Union
import pytest
@@ -14,6 +14,7 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -91,6 +92,198 @@ def test_with_steady_values_and_reset(
), f"{time_aggregation} should not be negative: {v['value']}"
def _assert_endpoint_group_values( # pylint: disable=too-many-arguments
endpoint_values: dict,
stable_health_value: float,
stable_products_value: float,
stable_checkout_value: float,
spike_checkout_value: float,
stable_orders_value: float,
spike_users_value: float,
time_aggregation: str,
) -> None:
# /health: 60 data points (t01-t60), steady +10/min
if "/health" in endpoint_values:
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(
1 for v in health_values if v["value"] == stable_health_value
)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
# all /health rates should be stable except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert (
v["value"] == stable_health_value
), f"Expected /health rate {stable_health_value}, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
if "/products" in endpoint_values:
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(
1 for v in products_values if v["value"] == stable_products_value
)
# most values should be stable, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
# check that non-stable values are due to gap averaging (should be lower)
gap_boundary_values = [
v["value"] for v in products_values if v["value"] != stable_products_value
]
for val in gap_boundary_values:
assert (
0 < val < stable_products_value
), f"Gap boundary values should be between 0 and {stable_products_value}, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
if "/checkout" in endpoint_values:
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(
1 for v in checkout_values if v["value"] == stable_checkout_value
)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(
1 for v in checkout_values if v["value"] == spike_checkout_value
)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate(checkout_values) if v["value"] == spike_checkout_value
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# reset at t31 causes: rate/increase at t30 includes gap (lower), t31 has high rate after reset
if "/orders" in endpoint_values:
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(
1 for v in orders_values if v["value"] == stable_orders_value
)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [
v["value"] for v in orders_values if v["value"] != stable_orders_value
]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > stable_orders_value]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high {time_aggregation} value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
if "/users" in endpoint_values:
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero {time_aggregation} values for /users (sparse data), got {count_zero_users}"
# non-zero values should be stable increment rate
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == spike_users_value)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
@pytest.mark.parametrize(
"order_suffix,order_by_spec,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"asc",
("endpoint", "asc"),
None,
5,
["/checkout", "/health", "/orders", "/products", "/users"],
),
(
"asc_lim3",
("endpoint", "asc"),
3,
3,
["/checkout", "/health", "/orders"],
),
(
"desc",
("endpoint", "desc"),
None,
5,
["/users", "/products", "/orders", "/health", "/checkout"],
),
(
"desc_lim3",
("endpoint", "desc"),
3,
3,
["/users", "/products", "/orders"],
),
(
"asc_metric_name",
("sum_metric", "asc"),
None,
5,
["/users", "/orders", "/checkout", "/health", "/products"],
),
(
"asc_metric_name_lim3",
("sum_metric", "asc"),
3,
3,
["/users", "/orders", "/checkout"],
),
(
"desc_metric_name",
("sum_metric", "desc"),
None,
5,
["/products", "/health", "/checkout", "/orders", "/users"],
),
(
"desc_metric_name_lim3",
("sum_metric", "desc"),
3,
3,
["/products", "/health", "/checkout"],
),
],
)
@pytest.mark.parametrize(
"time_aggregation, stable_health_value, stable_products_value, stable_checkout_value, spike_checkout_value, stable_orders_value, spike_users_value",
[
@@ -110,11 +303,24 @@ def test_group_by_endpoint(
spike_checkout_value: float,
stable_orders_value: float,
spike_users_value: float,
order_suffix: str,
order_by_spec: Optional[tuple],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_{time_aggregation}_groupby"
metric_name = f"test_{time_aggregation}_groupby_{order_suffix}"
# Build order_by at runtime so metric name reflects actual time_aggregation
order_by = None
if order_by_spec is not None:
key, direction = order_by_spec
if key == "sum_metric":
key = f"sum({metric_name})"
order_by = [build_order_by(key, direction)]
metrics = Metrics.load_from_file(
MULTI_TEMPORALITY_FILE,
@@ -130,6 +336,8 @@ def test_group_by_endpoint(
time_aggregation,
"sum",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query])
@@ -137,10 +345,23 @@ def test_group_by_endpoint(
data = response.json()
all_series = get_all_series(data, "A")
# Should have 5 different endpoints
assert (
len(all_series) == 5
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
len(all_series) == expected_count
), f"Expected {expected_count} series, got {len(all_series)}"
endpoint_labels = [
series.get("labels", [{}])[0].get("value", "unknown")
for series in all_series
]
if isinstance(expected_endpoints, set):
assert (
set(endpoint_labels) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
else:
assert endpoint_labels == expected_endpoints, (
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
)
# endpoint -> values
endpoint_values = {}
@@ -149,11 +370,6 @@ def test_group_by_endpoint(
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
endpoint_values[endpoint] = values
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
assert (
set(endpoint_values.keys()) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
# at no point rate should be negative
for endpoint, values in endpoint_values.items():
for v in values:
@@ -161,117 +377,16 @@ def test_group_by_endpoint(
v["value"] >= 0
), f"Rate for {endpoint} should not be negative: {v['value']}"
# /health: 60 data points (t01-t60), steady +10/min
health_values = endpoint_values["/health"]
assert (
len(health_values) >= 58
), f"Expected >= 58 values for /health, got {len(health_values)}"
count_steady_health = sum(
1 for v in health_values if v["value"] == stable_health_value
_assert_endpoint_group_values(
endpoint_values,
stable_health_value,
stable_products_value,
stable_checkout_value,
spike_checkout_value,
stable_orders_value,
spike_users_value,
time_aggregation,
)
assert (
count_steady_health >= 57
), f"Expected >= 57 steady rate values ({stable_health_value}) for /health, got {count_steady_health}"
# all /health rates should be state except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert (
v["value"] == stable_health_value
), f"Expected /health rate {stable_health_value}, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
products_values = endpoint_values["/products"]
assert (
len(products_values) >= 49
), f"Expected >= 49 values for /products, got {len(products_values)}"
count_steady_products = sum(
1 for v in products_values if v["value"] == stable_products_value
)
# most values should be stable, some boundary values differ due to 10-min gap
assert (
count_steady_products >= 46
), f"Expected >= 46 steady rate values ({stable_products_value}) for /products, got {count_steady_products}"
# check that non-stable values are due to gap averaging (should be lower)
gap_boundary_values = [
v["value"] for v in products_values if v["value"] != stable_products_value
]
for val in gap_boundary_values:
assert (
0 < val < stable_products_value
), f"Gap boundary values should be between 0 and {stable_products_value}, got {val}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) >= 59
), f"Expected >= 59 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(
1 for v in checkout_values if v["value"] == stable_checkout_value
)
assert (
count_steady_checkout >= 53
), f"Expected >= 53 steady {time_aggregation} values ({stable_checkout_value}) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(
1 for v in checkout_values if v["value"] == spike_checkout_value
)
assert (
count_spike_checkout >= 4
), f"Expected >= 4 spike {time_aggregation} values ({spike_checkout_value}) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate(checkout_values) if v["value"] == spike_checkout_value
]
assert len(spike_indices) >= 4, f"Expected >= 4 spike indices, got {spike_indices}"
# consecutiveness
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# reset at t31 causes: rate/increase at t30 includes gap (lower), t31 has high rate after reset
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) >= 58
), f"Expected >= 58 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(
1 for v in orders_values if v["value"] == stable_orders_value
)
assert (
count_steady_orders >= 55
), f"Expected >= 55 steady {time_aggregation} values ({stable_orders_value}) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [
v["value"] for v in orders_values if v["value"] != stable_orders_value
]
assert (
len(non_standard_orders) >= 2
), f"Expected >= 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > stable_orders_value]
assert (
len(high_rate_orders) >= 1
), f"Expected at least one high {time_aggregation} value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes
users_values = endpoint_values["/users"]
assert (
len(users_values) >= 54
), f"Expected >= 54 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users >= 40
), f"Expected >= 40 zero {time_aggregation} values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == spike_users_value)
assert (
count_increment_rate >= 8
), f"Expected >= 8 increment {time_aggregation} values ({spike_users_value}) for /users, got {count_increment_rate}"
@pytest.mark.parametrize(

View File

@@ -4,7 +4,7 @@ Look at the histogram_data_1h.jsonl file for the relevant data
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Callable, List
from typing import Callable, List, Optional, Union
import pytest
@@ -13,6 +13,7 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -521,4 +522,311 @@ def test_histogram_percentile_for_delta_service(
assert len(result_values) == 60
assert result_values[0]["value"] == zeroth_value
assert result_values[1]["value"] == first_value
assert result_values[-1]["value"] == last_value
assert result_values[-1]["value"] == last_value
def _assert_series_endpoint_labels(
series: list,
expected_endpoints: Union[set, List[str]],
prefix: str,
) -> None:
labels = [s.get("labels", [{}])[0].get("value", "unknown") for s in series]
if isinstance(expected_endpoints, set):
assert (
set(labels) == expected_endpoints
), f"Expected {prefix} endpoints {expected_endpoints}, got {set(labels)}"
else:
assert labels == expected_endpoints, (
f"Expected {prefix} endpoints in order {expected_endpoints}, got {labels}"
)
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
3,
["/checkout", "/health", "/orders"],
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
3,
["/checkout", "/health", "/orders"],
),
(
"asc_lim2",
[build_order_by("endpoint", "asc")],
2,
2,
["/checkout", "/health"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
3,
["/orders", "/health", "/checkout"],
),
(
"desc_lim2",
[build_order_by("endpoint", "desc")],
2,
2,
["/orders", "/health"],
),
(
"asc_metric_name",
[build_order_by("count(test_histogram_count_groupby_asc_metric_name)", "asc")],
None,
3,
["/health", "/orders", "/checkout"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
),
(
"asc_metric_name_lim2",
[build_order_by("count(test_histogram_count_groupby_asc_metric_name_lim2)", "asc")],
2,
2,
["/health", "/orders"],
),
(
"desc_metric_name",
[build_order_by("count(test_histogram_count_groupby_desc_metric_name)", "desc")],
None,
3,
["/checkout", "/health", "/orders"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
),
(
"desc_metric_name_lim2",
[build_order_by("count(test_histogram_count_groupby_desc_metric_name_lim2)", "desc")],
2,
2,
["/checkout", "/health"],
),
],
)
def test_histogram_count_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_histogram_count_groupby_{order_suffix}"
metrics = Metrics.load_from_file(
FILE,
base_time=now - timedelta(minutes=60),
metric_name_override=metric_name,
)
insert_metrics(metrics)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query_count = build_builder_query(
"A",
metric_name,
"increase",
"count",
comparisonSpaceAggregationParam={"threshold": 1000, "operator": "<="},
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query_count])
assert response.status_code == HTTPStatus.OK
data = response.json()
count_all_series = get_all_series(data, "A")
assert (
len(count_all_series) == expected_count
), f"Expected {expected_count} series, got {len(count_all_series)}"
_assert_series_endpoint_labels(count_all_series, expected_endpoints, "count")
count_values = {}
for series in count_all_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
count_values[endpoint] = sorted(
series.get("values", []), key=lambda x: x["timestamp"]
)
for endpoint, values in count_values.items():
for v in values:
assert v["value"] >= 0, f"Count for {endpoint} should not be negative: {v['value']}"
# /health (cumulative, service=api): 59 points, increase starts at 11/min → 69/min
if "/health" in count_values:
vals = count_values["/health"]
assert vals[0]["value"] == 11, f"Expected /health count first=11, got {vals[0]['value']}"
assert vals[-1]["value"] == 69, f"Expected /health count last=69, got {vals[-1]['value']}"
# /orders (cumulative, service=api): same distribution as /health
if "/orders" in count_values:
vals = count_values["/orders"]
assert vals[0]["value"] == 11, f"Expected /orders count first=11, got {vals[0]['value']}"
assert vals[-1]["value"] == 69, f"Expected /orders count last=69, got {vals[-1]['value']}"
# /checkout (delta, service=web): 60 points, zeroth=12345 (raw delta), then 11/min → 69/min
if "/checkout" in count_values:
vals = count_values["/checkout"]
assert vals[0]["value"] == 12345, f"Expected /checkout count zeroth=12345, got {vals[0]['value']}"
assert vals[1]["value"] == 11, f"Expected /checkout count first=11, got {vals[1]['value']}"
assert vals[-1]["value"] == 69, f"Expected /checkout count last=69, got {vals[-1]['value']}"
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
3,
[ "/health", "/orders","/checkout"],
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
3,
["/checkout", "/health", "/orders"],
),
(
"asc_lim2",
[build_order_by("endpoint", "asc")],
2,
2,
["/checkout", "/health"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
3,
["/orders", "/health", "/checkout"],
),
(
"desc_lim2",
[build_order_by("endpoint", "desc")],
2,
2,
["/orders", "/health"],
),
(
"asc_metric_name",
[build_order_by("p90(test_histogram_p90_groupby_asc_metric_name)", "asc")],
None,
3,
["/checkout", "/health", "/orders"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
),
(
"asc_metric_name_lim2",
[build_order_by("p90(test_histogram_p90_groupby_asc_metric_name_lim2)", "asc")],
2,
2,
["/checkout", "/health"],
),
(
"desc_metric_name",
[build_order_by("p90(test_histogram_p90_groupby_desc_metric_name)", "desc")],
None,
3,
["/health", "/orders", "/checkout"], ## health and orders have the same size so they are then sorted endpoint as a tiebreaker
),
(
"desc_metric_name_lim2",
[build_order_by("p90(test_histogram_p90_groupby_desc_metric_name_lim2)", "desc")],
2,
2,
["/health", "/orders"],
),
],
)
def test_histogram_percentile_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = f"test_histogram_p90_groupby_{order_suffix}"
metrics = Metrics.load_from_file(
FILE,
base_time=now - timedelta(minutes=60),
metric_name_override=metric_name,
)
insert_metrics(metrics)
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query_p90 = build_builder_query(
"A",
metric_name,
"doesnotreallymatter",
"p90",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query_p90])
assert response.status_code == HTTPStatus.OK
data = response.json()
p90_series = get_all_series(data, "A")
assert (
len(p90_series) == expected_count
), f"Expected {expected_count} p90 series, got {len(p90_series)}"
_assert_series_endpoint_labels(p90_series, expected_endpoints, "p90")
p90_values = {}
for series in p90_series:
endpoint = series.get("labels", [{}])[0].get("value", "unknown")
p90_values[endpoint] = sorted(
series.get("values", []), key=lambda x: x["timestamp"]
)
for endpoint, values in p90_values.items():
for v in values:
assert v["value"] >= 0, f"p90 for {endpoint} should not be negative: {v['value']}"
# /health (cumulative, service=api)
if "/health" in p90_values:
vals = p90_values["/health"]
assert vals[0]["value"] == 6400, f"Expected /health p90 first=6400, got {vals[0]['value']}"
assert vals[-1]["value"] == 991.304, f"Expected /health p90 last=991.304, got {vals[-1]['value']}"
# /orders (cumulative, service=api): same distribution as /health
if "/orders" in p90_values:
vals = p90_values["/orders"]
assert vals[0]["value"] == 6400, f"Expected /orders p90 first=6400, got {vals[0]['value']}"
assert vals[-1]["value"] == 991.304, f"Expected /orders p90 last=991.304, got {vals[-1]['value']}"
# /checkout (delta, service=web): 60 points
if "/checkout" in p90_values:
vals = p90_values["/checkout"]
assert vals[0]["value"] == 900, f"Expected /checkout p90 zeroth=900, got {vals[0]['value']}"
assert vals[1]["value"] == 6400, f"Expected /checkout p90 first=6400, got {vals[1]['value']}"
assert vals[-1]["value"] == 991.304, f"Expected /checkout p90 last=991.304, got {vals[-1]['value']}"

View File

@@ -5,13 +5,16 @@ Look at the delta_counters_1h.jsonl file for the relevant data
import os
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import Any, Callable, List
from typing import Any, Callable, List, Optional, Union
import pytest
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.metrics import Metrics
from fixtures.querier import (
build_builder_query,
build_order_by,
get_all_series,
get_series_values,
make_query_request,
@@ -69,16 +72,61 @@ def test_rate_with_steady_values_and_reset(
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
@pytest.mark.parametrize(
"order_suffix,order_by,limit,expected_count,expected_endpoints",
[
(
"no_order",
None,
None,
5,
{"/products", "/health", "/checkout", "/orders", "/users"},
),
(
"asc",
[build_order_by("endpoint", "asc")],
None,
5,
["/checkout", "/health", "/orders", "/products", "/users"],
),
(
"asc_lim3",
[build_order_by("endpoint", "asc")],
3,
3,
["/checkout", "/health", "/orders"],
),
(
"desc",
[build_order_by("endpoint", "desc")],
None,
5,
["/users", "/products", "/orders", "/health", "/checkout"],
),
(
"desc_lim3",
[build_order_by("endpoint", "desc")],
3,
3,
["/users", "/products", "/orders"],
),
],
)
def test_rate_group_by_endpoint(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_metrics: Callable[[List[Metrics]], None],
order_suffix: str,
order_by: Optional[List],
limit: Optional[int],
expected_count: int,
expected_endpoints: Union[set, List[str]],
) -> None:
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
metric_name = "test_rate_groupby"
metric_name = f"test_rate_groupby_{order_suffix}"
metrics = Metrics.load_from_file(
DELTA_COUNTERS_FILE,
@@ -94,6 +142,8 @@ def test_rate_group_by_endpoint(
"rate",
"sum",
group_by=["endpoint"],
order_by=order_by,
limit=limit,
)
response = make_query_request(signoz, token, start_ms, end_ms, [query])
@@ -102,10 +152,23 @@ def test_rate_group_by_endpoint(
data = response.json()
all_series = get_all_series(data, "A")
# Should have 5 different endpoints
assert (
len(all_series) == 5
), f"Expected 5 series for 5 endpoints, got {len(all_series)}"
len(all_series) == expected_count
), f"Expected {expected_count} series, got {len(all_series)}"
endpoint_labels = [
series.get("labels", [{}])[0].get("value", "unknown")
for series in all_series
]
if isinstance(expected_endpoints, set):
assert (
set(endpoint_labels) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_labels)}"
else:
assert endpoint_labels == expected_endpoints, (
f"Expected endpoints {expected_endpoints}, got {endpoint_labels}"
)
# endpoint -> values
endpoint_values = {}
@@ -114,11 +177,6 @@ def test_rate_group_by_endpoint(
values = sorted(series.get("values", []), key=lambda x: x["timestamp"])
endpoint_values[endpoint] = values
expected_endpoints = {"/products", "/health", "/checkout", "/orders", "/users"}
assert (
set(endpoint_values.keys()) == expected_endpoints
), f"Expected endpoints {expected_endpoints}, got {set(endpoint_values.keys())}"
# at no point rate should be negative
for endpoint, values in endpoint_values.items():
for v in values:
@@ -128,93 +186,95 @@ def test_rate_group_by_endpoint(
# /health: 60 data points (t01-t60), steady +10/min
# rate = 10/60 = 0.167
health_values = endpoint_values["/health"]
assert (
len(health_values) == 60
), f"Expected 60 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health == 60
), f"Expected == 60 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
if "/health" in endpoint_values:
health_values = endpoint_values["/health"]
assert (
len(health_values) == 60
), f"Expected 60 values for /health, got {len(health_values)}"
count_steady_health = sum(1 for v in health_values if v["value"] == 0.167)
assert (
count_steady_health == 60
), f"Expected == 60 steady rate values (0.167) for /health, got {count_steady_health}"
# all /health rates should be 0.167 except possibly first/last due to boundaries
for v in health_values[1:-1]:
assert v["value"] == 0.167, f"Expected /health rate 0.167, got {v['value']}"
# /products: 51 data points with 10-minute gap (t20-t29 missing), steady +20/min
# rate = 20/60 = 0.333, gap causes lower averaged rate at boundary
products_values = endpoint_values["/products"]
assert (
len(products_values) == 51
), f"Expected 51 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
assert (
count_steady_products == 51
), f"Expected 51 steady rate values (0.333) for /products, got {count_steady_products}"
if "/products" in endpoint_values:
products_values = endpoint_values["/products"]
assert (
len(products_values) == 51
), f"Expected 51 values for /products, got {len(products_values)}"
count_steady_products = sum(1 for v in products_values if v["value"] == 0.333)
assert (
count_steady_products == 51
), f"Expected 51 steady rate values (0.333) for /products, got {count_steady_products}"
# /checkout: 61 data points (t00-t60), +1/min normal, +50/min spike at t40-t44
# normal rate = 1/60 = 0.0167, spike rate = 50/60 = 0.833
checkout_values = endpoint_values["/checkout"]
assert (
len(checkout_values) == 61
), f"Expected 61 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout == 56
), f"Expected 56 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout == 5
), f"Expected 5 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) == 5, f"Expected 5 spike indices, got {spike_indices}"
# consecutiveness
for i in range(1, len(spike_indices)):
if "/checkout" in endpoint_values:
checkout_values = endpoint_values["/checkout"]
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
len(checkout_values) == 61
), f"Expected 61 values for /checkout, got {len(checkout_values)}"
count_steady_checkout = sum(1 for v in checkout_values if v["value"] == 0.0167)
assert (
count_steady_checkout == 56
), f"Expected 56 steady rate values (0.0167) for /checkout, got {count_steady_checkout}"
# check that spike values exist (traffic spike +50/min at t40-t44)
count_spike_checkout = sum(1 for v in checkout_values if v["value"] == 0.833)
assert (
count_spike_checkout == 5
), f"Expected 5 spike rate values (0.833) for /checkout, got {count_spike_checkout}"
# spike values should be consecutive
spike_indices = [
i for i, v in enumerate[Any](checkout_values) if v["value"] == 0.833
]
assert len(spike_indices) == 5, f"Expected 5 spike indices, got {spike_indices}"
for i in range(1, len(spike_indices)):
assert (
spike_indices[i] == spike_indices[i - 1] + 1
), f"Spike indices should be consecutive, got {spike_indices}"
# /orders: 60 data points (t00-t60) with gap at t30, counter reset at t31 (150->2)
# rate = 5/60 = 0.0833
# reset at t31 causes: rate at t30 includes gap (lower), t31 has high rate after reset
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) == 60
), f"Expected 59 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders == 58
), f"Expected 58 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) == 2
), f"Expected 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) == 1
), f"Expected one high rate value after counter reset, got {non_standard_orders}"
if "/orders" in endpoint_values:
orders_values = endpoint_values["/orders"]
assert (
len(orders_values) == 60
), f"Expected 59 values for /orders, got {len(orders_values)}"
count_steady_orders = sum(1 for v in orders_values if v["value"] == 0.0833)
assert (
count_steady_orders == 58
), f"Expected 58 steady rate values (0.0833) for /orders, got {count_steady_orders}"
# check for counter reset effects - there should be some non-standard values
non_standard_orders = [v["value"] for v in orders_values if v["value"] != 0.0833]
assert (
len(non_standard_orders) == 2
), f"Expected 2 non-standard values due to counter reset, got {non_standard_orders}"
# post-reset value should be higher (new counter value / interval)
high_rate_orders = [v for v in non_standard_orders if v > 0.0833]
assert (
len(high_rate_orders) == 1
), f"Expected one high rate value after counter reset, got {non_standard_orders}"
# /users: 56 data points (t05-t60), sparse +1 every 5 minutes (12 of them)
# Rate = 1/60 = 0.0167 during increment, 0 during flat periods
users_values = endpoint_values["/users"]
assert (
len(users_values) == 56
), f"Expected 56 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users == 44
), f"Expected 44 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate == 12
), f"Expected 12 increment rate values (0.0167) for /users, got {count_increment_rate}"
if "/users" in endpoint_values:
users_values = endpoint_values["/users"]
assert (
len(users_values) == 56
), f"Expected 56 values for /users, got {len(users_values)}"
count_zero_users = sum(1 for v in users_values if v["value"] == 0)
# most values should be 0 (flat periods between increments)
assert (
count_zero_users == 44
), f"Expected 44 zero rate values for /users (sparse data), got {count_zero_users}"
# non-zero values should be 0.0167 (1/60 increment rate)
non_zero_users = [v["value"] for v in users_values if v["value"] != 0]
count_increment_rate = sum(1 for v in non_zero_users if v == 0.0167)
assert (
count_increment_rate == 12
), f"Expected 12 increment rate values (0.0167) for /users, got {count_increment_rate}"