mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-21 00:02:41 +00:00
Compare commits
17 Commits
fix/chart-
...
nv/6703
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4bd5f804ec | ||
|
|
a349b6935d | ||
|
|
5780fac789 | ||
|
|
3450a98fdf | ||
|
|
4fa5c42a36 | ||
|
|
3555a73ac0 | ||
|
|
ed2d8d06cb | ||
|
|
203318a206 | ||
|
|
67fbf5a548 | ||
|
|
195dd36078 | ||
|
|
2c12f0bd03 | ||
|
|
fcbb1843d9 | ||
|
|
00dff5c930 | ||
|
|
c7e5258329 | ||
|
|
84a7be22f7 | ||
|
|
c5923f942f | ||
|
|
f91eee7e50 |
@@ -749,6 +749,14 @@ components:
|
||||
- temporality
|
||||
- isMonotonic
|
||||
type: object
|
||||
MetrictypesComparisonSpaceAggregationParam:
|
||||
properties:
|
||||
operator:
|
||||
type: string
|
||||
threshold:
|
||||
format: double
|
||||
type: number
|
||||
type: object
|
||||
MetrictypesSpaceAggregation:
|
||||
enum:
|
||||
- sum
|
||||
@@ -761,6 +769,7 @@ components:
|
||||
- p90
|
||||
- p95
|
||||
- p99
|
||||
- histogram_count
|
||||
type: string
|
||||
MetrictypesTemporality:
|
||||
enum:
|
||||
@@ -1045,6 +1054,8 @@ components:
|
||||
type: object
|
||||
Querybuildertypesv5MetricAggregation:
|
||||
properties:
|
||||
comparisonSpaceAggregationParam:
|
||||
$ref: '#/components/schemas/MetrictypesComparisonSpaceAggregationParam'
|
||||
metricName:
|
||||
type: string
|
||||
reduceTo:
|
||||
|
||||
@@ -927,6 +927,18 @@ export interface MetricsexplorertypesUpdateMetricMetadataRequestDTO {
|
||||
unit: string;
|
||||
}
|
||||
|
||||
export interface MetrictypesComparisonSpaceAggregationParamDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
operator?: string;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
threshold?: number;
|
||||
}
|
||||
|
||||
export enum MetrictypesSpaceAggregationDTO {
|
||||
sum = 'sum',
|
||||
avg = 'avg',
|
||||
@@ -938,6 +950,7 @@ export enum MetrictypesSpaceAggregationDTO {
|
||||
p90 = 'p90',
|
||||
p95 = 'p95',
|
||||
p99 = 'p99',
|
||||
histogram_count = 'histogram_count',
|
||||
}
|
||||
export enum MetrictypesTemporalityDTO {
|
||||
delta = 'delta',
|
||||
@@ -1288,6 +1301,7 @@ export interface Querybuildertypesv5LogAggregationDTO {
|
||||
}
|
||||
|
||||
export interface Querybuildertypesv5MetricAggregationDTO {
|
||||
comparisonSpaceAggregationParam?: MetrictypesComparisonSpaceAggregationParamDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
|
||||
@@ -80,11 +80,16 @@ func (q *builderQuery[T]) Fingerprint() string {
|
||||
case qbtypes.LogAggregation:
|
||||
aggParts = append(aggParts, a.Expression)
|
||||
case qbtypes.MetricAggregation:
|
||||
aggParts = append(aggParts, fmt.Sprintf("%s:%s:%s:%s",
|
||||
var spaceAggParamStr string
|
||||
if a.ComparisonSpaceAggregationParam != nil {
|
||||
spaceAggParamStr = a.ComparisonSpaceAggregationParam.StringValue()
|
||||
}
|
||||
aggParts = append(aggParts, fmt.Sprintf("%s:%s:%s:%s:%s",
|
||||
a.MetricName,
|
||||
a.Temporality.StringValue(),
|
||||
a.TimeAggregation.StringValue(),
|
||||
a.SpaceAggregation.StringValue(),
|
||||
spaceAggParamStr,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,7 +123,8 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
origTimeAgg := query.Aggregations[0].TimeAggregation
|
||||
origGroupBy := slices.Clone(query.GroupBy)
|
||||
|
||||
if query.Aggregations[0].SpaceAggregation.IsPercentile() &&
|
||||
if (query.Aggregations[0].SpaceAggregation.IsPercentile() ||
|
||||
query.Aggregations[0].SpaceAggregation == metrictypes.SpaceAggregationHistogramCount) &&
|
||||
query.Aggregations[0].Type != metrictypes.ExpHistogramType {
|
||||
// add le in the group by if doesn't exist
|
||||
leExists := false
|
||||
@@ -154,7 +155,11 @@ func (b *MetricQueryStatementBuilder) buildPipelineStatement(
|
||||
}
|
||||
|
||||
// make the time aggregation rate and space aggregation sum
|
||||
query.Aggregations[0].TimeAggregation = metrictypes.TimeAggregationRate
|
||||
if query.Aggregations[0].SpaceAggregation.IsPercentile() {
|
||||
query.Aggregations[0].TimeAggregation = metrictypes.TimeAggregationRate
|
||||
} else {
|
||||
query.Aggregations[0].TimeAggregation = metrictypes.TimeAggregationIncrease
|
||||
}
|
||||
query.Aggregations[0].SpaceAggregation = metrictypes.SpaceAggregationSum
|
||||
}
|
||||
|
||||
@@ -524,7 +529,7 @@ func (b *MetricQueryStatementBuilder) buildSpatialAggregationCTE(
|
||||
return "", nil, errors.Newf(
|
||||
errors.TypeInvalidInput,
|
||||
errors.CodeInvalidInput,
|
||||
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`, `p50`, `p75`, `p90`, `p95`, `p99`]",
|
||||
"invalid space aggregation, should be one of the following: [`sum`, `avg`, `min`, `max`, `count`, `p50`, `p75`, `p90`, `p95`, `p99`, `histogram_count`]",
|
||||
)
|
||||
}
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
@@ -577,6 +582,29 @@ func (b *MetricQueryStatementBuilder) BuildFinalSelect(
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.GroupBy("ts")
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
|
||||
sb.Having(rewrittenExpr)
|
||||
}
|
||||
} else if query.Aggregations[0].SpaceAggregation == metrictypes.SpaceAggregationHistogramCount {
|
||||
sb.Select("ts")
|
||||
|
||||
for _, g := range query.GroupBy {
|
||||
sb.SelectMore(fmt.Sprintf("`%s`", g.TelemetryFieldKey.Name))
|
||||
}
|
||||
|
||||
aggQuery, err := AggregationQueryForHistogramCount(query.Aggregations[0].ComparisonSpaceAggregationParam)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sb.SelectMore(aggQuery)
|
||||
|
||||
sb.From("__spatial_aggregation_cte")
|
||||
|
||||
sb.GroupBy(querybuilder.GroupByKeys(query.GroupBy)...)
|
||||
sb.GroupBy("ts")
|
||||
|
||||
if query.Having != nil && query.Having.Expression != "" {
|
||||
rewriter := querybuilder.NewHavingExpressionRewriter()
|
||||
rewrittenExpr := rewriter.RewriteForMetrics(query.Having.Expression, query.Aggregations)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package telemetrymetrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
@@ -308,3 +309,20 @@ func AggregationColumnForSamplesTable(
|
||||
}
|
||||
return aggregationColumn, nil
|
||||
}
|
||||
|
||||
func AggregationQueryForHistogramCount(param *metrictypes.ComparisonSpaceAggregationParam) (string, error) {
|
||||
if param == nil {
|
||||
return "", errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "no aggregation param provided for histogram count")
|
||||
}
|
||||
histogramCountThreshold := param.Threshold
|
||||
|
||||
switch param.Operater {
|
||||
case "<=":
|
||||
return fmt.Sprintf("argMaxIf(value, toFloat64(le), toFloat64(le) <= %f) + (argMinIf(value, toFloat64(le), toFloat64(le) > %f) - argMaxIf(value, toFloat64(le), toFloat64(le) <= %f)) * (%f - maxIf(toFloat64(le), toFloat64(le) <= %f)) / (minIf(toFloat64(le), toFloat64(le) > %f) - maxIf(toFloat64(le), toFloat64(le) <= %f)) AS value", histogramCountThreshold, histogramCountThreshold, histogramCountThreshold, histogramCountThreshold, histogramCountThreshold, histogramCountThreshold, histogramCountThreshold), nil
|
||||
case ">":
|
||||
return fmt.Sprintf("argMax(value, toFloat64(le)) - (argMaxIf(value, toFloat64(le), toFloat64(le) <= %f) + (argMinIf(value, toFloat64(le), toFloat64(le) > %f) - argMaxIf(value, toFloat64(le), toFloat64(le) <= %f)) * (%f - maxIf(toFloat64(le), toFloat64(le) <= %f)) / (minIf(toFloat64(le), toFloat64(le) > %f) - maxIf(toFloat64(le), toFloat64(le) <= %f))) AS value", histogramCountThreshold, histogramCountThreshold, histogramCountThreshold, histogramCountThreshold, histogramCountThreshold, histogramCountThreshold, histogramCountThreshold), nil
|
||||
default:
|
||||
return "", errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "invalid space aggregation operator, should be one of the following: [`<=`, `>`]")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package metrictypes
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
@@ -189,17 +190,18 @@ type SpaceAggregation struct {
|
||||
}
|
||||
|
||||
var (
|
||||
SpaceAggregationUnspecified = SpaceAggregation{valuer.NewString("")}
|
||||
SpaceAggregationSum = SpaceAggregation{valuer.NewString("sum")}
|
||||
SpaceAggregationAvg = SpaceAggregation{valuer.NewString("avg")}
|
||||
SpaceAggregationMin = SpaceAggregation{valuer.NewString("min")}
|
||||
SpaceAggregationMax = SpaceAggregation{valuer.NewString("max")}
|
||||
SpaceAggregationCount = SpaceAggregation{valuer.NewString("count")}
|
||||
SpaceAggregationPercentile50 = SpaceAggregation{valuer.NewString("p50")}
|
||||
SpaceAggregationPercentile75 = SpaceAggregation{valuer.NewString("p75")}
|
||||
SpaceAggregationPercentile90 = SpaceAggregation{valuer.NewString("p90")}
|
||||
SpaceAggregationPercentile95 = SpaceAggregation{valuer.NewString("p95")}
|
||||
SpaceAggregationPercentile99 = SpaceAggregation{valuer.NewString("p99")}
|
||||
SpaceAggregationUnspecified = SpaceAggregation{valuer.NewString("")}
|
||||
SpaceAggregationSum = SpaceAggregation{valuer.NewString("sum")}
|
||||
SpaceAggregationAvg = SpaceAggregation{valuer.NewString("avg")}
|
||||
SpaceAggregationMin = SpaceAggregation{valuer.NewString("min")}
|
||||
SpaceAggregationMax = SpaceAggregation{valuer.NewString("max")}
|
||||
SpaceAggregationCount = SpaceAggregation{valuer.NewString("count")}
|
||||
SpaceAggregationPercentile50 = SpaceAggregation{valuer.NewString("p50")}
|
||||
SpaceAggregationPercentile75 = SpaceAggregation{valuer.NewString("p75")}
|
||||
SpaceAggregationPercentile90 = SpaceAggregation{valuer.NewString("p90")}
|
||||
SpaceAggregationPercentile95 = SpaceAggregation{valuer.NewString("p95")}
|
||||
SpaceAggregationPercentile99 = SpaceAggregation{valuer.NewString("p99")}
|
||||
SpaceAggregationHistogramCount = SpaceAggregation{valuer.NewString("histogram_count")}
|
||||
)
|
||||
|
||||
func (SpaceAggregation) Enum() []any {
|
||||
@@ -214,6 +216,7 @@ func (SpaceAggregation) Enum() []any {
|
||||
SpaceAggregationPercentile90,
|
||||
SpaceAggregationPercentile95,
|
||||
SpaceAggregationPercentile99,
|
||||
SpaceAggregationHistogramCount,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -256,3 +259,12 @@ type MetricTableHints struct {
|
||||
type MetricValueFilter struct {
|
||||
Value float64
|
||||
}
|
||||
|
||||
type ComparisonSpaceAggregationParam struct {
|
||||
Operater string `json:"operator"`
|
||||
Threshold float64 `json:"threshold"`
|
||||
}
|
||||
|
||||
func (param ComparisonSpaceAggregationParam) StringValue() string {
|
||||
return fmt.Sprintf("{\"operator\": \"%s\", \"limit\": \"%f\"}", param.Operater, param.Threshold)
|
||||
}
|
||||
|
||||
@@ -446,6 +446,8 @@ type MetricAggregation struct {
|
||||
TimeAggregation metrictypes.TimeAggregation `json:"timeAggregation"`
|
||||
// space aggregation to apply to the query
|
||||
SpaceAggregation metrictypes.SpaceAggregation `json:"spaceAggregation"`
|
||||
// param for space aggregation if needed
|
||||
ComparisonSpaceAggregationParam *metrictypes.ComparisonSpaceAggregationParam `json:"comparisonSpaceAggregationParam"`
|
||||
// table hints to use for the query
|
||||
TableHints *metrictypes.MetricTableHints `json:"-"`
|
||||
// value filter to apply to the query
|
||||
|
||||
@@ -52,6 +52,7 @@ def build_builder_query(
|
||||
time_aggregation: str,
|
||||
space_aggregation: str,
|
||||
*,
|
||||
comparisonSpaceAggregationParam: Optional[Dict] = None,
|
||||
temporality: Optional[str] = None,
|
||||
step_interval: int = DEFAULT_STEP_INTERVAL,
|
||||
group_by: Optional[List[str]] = None,
|
||||
@@ -74,7 +75,8 @@ def build_builder_query(
|
||||
}
|
||||
if temporality:
|
||||
spec["aggregations"][0]["temporality"] = temporality
|
||||
|
||||
if comparisonSpaceAggregationParam:
|
||||
spec["aggregations"][0]["comparisonSpaceAggregationParam"] = comparisonSpaceAggregationParam
|
||||
if group_by:
|
||||
spec["groupBy"] = [
|
||||
{
|
||||
|
||||
259
tests/integration/src/querier/08_metrics_histogram.py
Normal file
259
tests/integration/src/querier/08_metrics_histogram.py
Normal file
@@ -0,0 +1,259 @@
|
||||
"""
|
||||
Look at the multi_temporality_counters_1h.jsonl file for the relevant data
|
||||
"""
|
||||
|
||||
import random
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
|
||||
import pytest
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
)
|
||||
from fixtures.utils import get_testdata_file_path
|
||||
|
||||
FILE = get_testdata_file_path("histogram_data_1h.jsonl")
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"threshold, operator, first_value, last_value",
|
||||
[
|
||||
(1000, "<=", 11, 69),
|
||||
(100, "<=", 1.1, 6.9),
|
||||
(7500, "<=", 16.75, 74.75),
|
||||
(8000, "<=", 17, 75),
|
||||
(80000, "<=", 17, 75), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(1000, ">", 7, 7),
|
||||
(100, ">", 16.9, 69.1),
|
||||
(7500, ">", 1.25, 1.25),
|
||||
(8000, ">", 1, 1),
|
||||
(80000, ">", 1, 1), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
],
|
||||
)
|
||||
def test_histogram_count_for_one_endpoint(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
threshold: float,
|
||||
operator: str,
|
||||
first_value: float,
|
||||
last_value: float,
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_bucket"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"rate",
|
||||
"histogram_count",
|
||||
comparisonSpaceAggregationParam={
|
||||
"threshold": threshold,
|
||||
"operator": operator
|
||||
},
|
||||
filter_expression='endpoint = "/health"',
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) == 59
|
||||
assert result_values[0]["value"] == first_value
|
||||
assert result_values[-1]["value"] == last_value
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"threshold, operator, first_value, last_value",
|
||||
[
|
||||
(1000, "<=", 22, 138),
|
||||
(100, "<=", 2.2, 13.8),
|
||||
(7500, "<=", 33.5, 149.5),
|
||||
(8000, "<=", 34, 150),
|
||||
(80000, "<=", 34, 150), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(1000, ">", 14, 14),
|
||||
(100, ">", 33.8, 138.2),
|
||||
(7500, ">", 2.5, 2.5),
|
||||
(8000, ">", 2, 2),
|
||||
(80000, ">", 2, 2), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
],
|
||||
)
|
||||
def test_histogram_count_for_one_service(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
threshold: float,
|
||||
operator: str,
|
||||
first_value: float,
|
||||
last_value: float,
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_bucket"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"rate",
|
||||
"histogram_count",
|
||||
comparisonSpaceAggregationParam={
|
||||
"threshold": threshold,
|
||||
"operator": operator
|
||||
},
|
||||
filter_expression='service = "api"',
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) == 59
|
||||
assert result_values[0]["value"] == first_value
|
||||
assert result_values[-1]["value"] == last_value
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"threshold, operator, first_value, last_value",
|
||||
[
|
||||
(1000, "<=", 11, 69),
|
||||
(100, "<=", 1.1, 6.9),
|
||||
(7500, "<=", 16.75, 74.75),
|
||||
(8000, "<=", 17, 75),
|
||||
(80000, "<=", 17, 75), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(1000, ">", 7, 7),
|
||||
(100, ">", 16.9, 69.1),
|
||||
(7500, ">", 1.25, 1.25),
|
||||
(8000, ">", 1, 1),
|
||||
(80000, ">", 1, 1), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
],
|
||||
)
|
||||
def test_histogram_count_for_delta_service(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
threshold: float,
|
||||
operator: str,
|
||||
first_value: float,
|
||||
last_value: float,
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_bucket"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"rate",
|
||||
"histogram_count",
|
||||
comparisonSpaceAggregationParam={
|
||||
"threshold": threshold,
|
||||
"operator": operator
|
||||
},
|
||||
filter_expression='service = "web"',
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) == 60 ## in delta, the value at 10:01 will also be reported
|
||||
assert result_values[1]["value"] == first_value ## to keep parallel to the cumulative test cases, first_value refers to the value at 10:02
|
||||
assert result_values[-1]["value"] == last_value
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"threshold, operator, first_value, last_value",
|
||||
[
|
||||
(1000, "<=", 33, 207),
|
||||
(100, "<=", 3.3, 20.7),
|
||||
(7500, "<=", 50.25, 224.25),
|
||||
(8000, "<=", 51, 225),
|
||||
(80000, "<=", 51, 225),
|
||||
(1000, ">", 21, 21),
|
||||
(100, ">", 50.7, 207.3),
|
||||
(7500, ">", 3.75, 3.75),
|
||||
(8000, ">", 3, 3),
|
||||
(80000, ">", 3, 3),
|
||||
],
|
||||
)
|
||||
def test_histogram_count_for_all_services(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_metrics: Callable[[List[Metrics]], None],
|
||||
threshold: float,
|
||||
operator: str,
|
||||
first_value: float,
|
||||
last_value: float,
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
metric_name = f"test_bucket"
|
||||
|
||||
metrics = Metrics.load_from_file(
|
||||
FILE,
|
||||
base_time=now - timedelta(minutes=60),
|
||||
metric_name_override=metric_name,
|
||||
)
|
||||
insert_metrics(metrics)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"rate",
|
||||
"histogram_count",
|
||||
comparisonSpaceAggregationParam={
|
||||
"threshold": threshold,
|
||||
"operator": operator
|
||||
},
|
||||
## no services filter, this tests for multitemporality handling as well
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) == 60
|
||||
assert result_values[1]["value"] == first_value ## to keep parallel to the cumulative test cases, first_value refers to the value at 10:02
|
||||
assert result_values[-1]["value"] == last_value
|
||||
1440
tests/integration/testdata/histogram_data_1h.jsonl
vendored
Normal file
1440
tests/integration/testdata/histogram_data_1h.jsonl
vendored
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user