mirror of
https://github.com/SigNoz/signoz.git
synced 2026-03-01 19:42:55 +00:00
Compare commits
1 Commits
feat/pinne
...
int-tests-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
13117d2b03 |
@@ -15,6 +15,7 @@ pytest_plugins = [
|
||||
"fixtures.logs",
|
||||
"fixtures.traces",
|
||||
"fixtures.metrics",
|
||||
"fixtures.meter",
|
||||
"fixtures.driver",
|
||||
"fixtures.idp",
|
||||
"fixtures.idputils",
|
||||
|
||||
121
tests/integration/fixtures/meter.py
Normal file
121
tests/integration/fixtures/meter.py
Normal file
@@ -0,0 +1,121 @@
|
||||
import hashlib
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Callable, Generator, List
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from fixtures import types
|
||||
|
||||
|
||||
class MeterSample:
|
||||
temporality: str
|
||||
metric_name: str
|
||||
description: str
|
||||
unit: str
|
||||
type: str
|
||||
is_monotonic: bool
|
||||
labels: str
|
||||
fingerprint: np.uint64
|
||||
unix_milli: np.int64
|
||||
value: np.float64
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metric_name: str,
|
||||
labels: dict[str, str],
|
||||
timestamp: datetime,
|
||||
value: float,
|
||||
temporality: str = "Delta",
|
||||
description: str = "",
|
||||
unit: str = "",
|
||||
type_: str = "Sum",
|
||||
is_monotonic: bool = True,
|
||||
) -> None:
|
||||
self.temporality = temporality
|
||||
self.metric_name = metric_name
|
||||
self.description = description
|
||||
self.unit = unit
|
||||
self.type = type_
|
||||
self.is_monotonic = is_monotonic
|
||||
self.labels = json.dumps(labels, separators=(",", ":"))
|
||||
self.unix_milli = np.int64(int(timestamp.timestamp() * 1e3))
|
||||
self.value = np.float64(value)
|
||||
|
||||
fingerprint_str = metric_name + self.labels
|
||||
self.fingerprint = np.uint64(
|
||||
int(hashlib.md5(fingerprint_str.encode()).hexdigest()[:16], 16)
|
||||
)
|
||||
|
||||
def to_samples_row(self) -> list:
|
||||
return [
|
||||
self.temporality,
|
||||
self.metric_name,
|
||||
self.description,
|
||||
self.unit,
|
||||
self.type,
|
||||
self.is_monotonic,
|
||||
self.labels,
|
||||
self.fingerprint,
|
||||
self.unix_milli,
|
||||
self.value,
|
||||
]
|
||||
|
||||
|
||||
def make_meter_samples(
|
||||
metric_name: str,
|
||||
labels: dict[str, str],
|
||||
now: datetime,
|
||||
count: int = 60,
|
||||
base_value: float = 100.0,
|
||||
**kwargs,
|
||||
) -> List[MeterSample]:
|
||||
samples = []
|
||||
for i in range(count):
|
||||
ts = now - timedelta(minutes=count - i)
|
||||
samples.append(
|
||||
MeterSample(
|
||||
metric_name=metric_name,
|
||||
labels=labels,
|
||||
timestamp=ts,
|
||||
value=base_value + i,
|
||||
**kwargs,
|
||||
)
|
||||
)
|
||||
return samples
|
||||
|
||||
|
||||
@pytest.fixture(name="insert_meter_samples", scope="function")
|
||||
def insert_meter_samples(
|
||||
clickhouse: types.TestContainerClickhouse,
|
||||
) -> Generator[Callable[[List[MeterSample]], None], Any, None]:
|
||||
def _insert_meter_samples(samples: List[MeterSample]) -> None:
|
||||
if len(samples) == 0:
|
||||
return
|
||||
|
||||
clickhouse.conn.insert(
|
||||
database="signoz_meter",
|
||||
table="distributed_samples",
|
||||
column_names=[
|
||||
"temporality",
|
||||
"metric_name",
|
||||
"description",
|
||||
"unit",
|
||||
"type",
|
||||
"is_monotonic",
|
||||
"labels",
|
||||
"fingerprint",
|
||||
"unix_milli",
|
||||
"value",
|
||||
],
|
||||
data=[s.to_samples_row() for s in samples],
|
||||
)
|
||||
|
||||
yield _insert_meter_samples
|
||||
|
||||
cluster = clickhouse.env["SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER"]
|
||||
for table in ["samples", "samples_agg_1d"]:
|
||||
clickhouse.conn.query(
|
||||
f"TRUNCATE TABLE signoz_meter.{table} ON CLUSTER '{cluster}' SYNC"
|
||||
)
|
||||
@@ -54,6 +54,7 @@ def build_builder_query(
|
||||
*,
|
||||
comparisonSpaceAggregationParam: Optional[Dict] = None,
|
||||
temporality: Optional[str] = None,
|
||||
source: Optional[str] = None,
|
||||
step_interval: int = DEFAULT_STEP_INTERVAL,
|
||||
group_by: Optional[List[str]] = None,
|
||||
filter_expression: Optional[str] = None,
|
||||
@@ -73,10 +74,14 @@ def build_builder_query(
|
||||
"stepInterval": step_interval,
|
||||
"disabled": disabled,
|
||||
}
|
||||
if source:
|
||||
spec["source"] = source
|
||||
if temporality:
|
||||
spec["aggregations"][0]["temporality"] = temporality
|
||||
if comparisonSpaceAggregationParam:
|
||||
spec["aggregations"][0]["comparisonSpaceAggregationParam"] = comparisonSpaceAggregationParam
|
||||
spec["aggregations"][0][
|
||||
"comparisonSpaceAggregationParam"
|
||||
] = comparisonSpaceAggregationParam
|
||||
if group_by:
|
||||
spec["groupBy"] = [
|
||||
{
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
Look at the histogram_data_1h.jsonl file for the relevant data
|
||||
"""
|
||||
|
||||
import random
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
@@ -22,6 +21,7 @@ from fixtures.utils import get_testdata_file_path
|
||||
|
||||
FILE = get_testdata_file_path("histogram_data_1h.jsonl")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"threshold, operator, first_value, last_value",
|
||||
[
|
||||
@@ -29,12 +29,22 @@ FILE = get_testdata_file_path("histogram_data_1h.jsonl")
|
||||
(100, "<=", 1.1, 6.9),
|
||||
(7500, "<=", 16.75, 74.75),
|
||||
(8000, "<=", 17, 75),
|
||||
(80000, "<=", 17, 75), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(
|
||||
80000,
|
||||
"<=",
|
||||
17,
|
||||
75,
|
||||
), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(1000, ">", 7, 7),
|
||||
(100, ">", 16.9, 69.1),
|
||||
(7500, ">", 1.25, 1.25),
|
||||
(8000, ">", 1, 1),
|
||||
(80000, ">", 1, 1), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(
|
||||
80000,
|
||||
">",
|
||||
1,
|
||||
1,
|
||||
), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
],
|
||||
)
|
||||
def test_histogram_count_for_one_endpoint(
|
||||
@@ -65,10 +75,7 @@ def test_histogram_count_for_one_endpoint(
|
||||
metric_name,
|
||||
"increase",
|
||||
"count",
|
||||
comparisonSpaceAggregationParam={
|
||||
"threshold": threshold,
|
||||
"operator": operator
|
||||
},
|
||||
comparisonSpaceAggregationParam={"threshold": threshold, "operator": operator},
|
||||
filter_expression='endpoint = "/health"',
|
||||
)
|
||||
|
||||
@@ -81,6 +88,7 @@ def test_histogram_count_for_one_endpoint(
|
||||
assert result_values[0]["value"] == first_value
|
||||
assert result_values[-1]["value"] == last_value
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"threshold, operator, first_value, last_value",
|
||||
[
|
||||
@@ -88,12 +96,22 @@ def test_histogram_count_for_one_endpoint(
|
||||
(100, "<=", 2.2, 13.8),
|
||||
(7500, "<=", 33.5, 149.5),
|
||||
(8000, "<=", 34, 150),
|
||||
(80000, "<=", 34, 150), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(
|
||||
80000,
|
||||
"<=",
|
||||
34,
|
||||
150,
|
||||
), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(1000, ">", 14, 14),
|
||||
(100, ">", 33.8, 138.2),
|
||||
(7500, ">", 2.5, 2.5),
|
||||
(8000, ">", 2, 2),
|
||||
(80000, ">", 2, 2), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(
|
||||
80000,
|
||||
">",
|
||||
2,
|
||||
2,
|
||||
), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
],
|
||||
)
|
||||
def test_histogram_count_for_one_service(
|
||||
@@ -124,10 +142,7 @@ def test_histogram_count_for_one_service(
|
||||
metric_name,
|
||||
"increase",
|
||||
"count",
|
||||
comparisonSpaceAggregationParam={
|
||||
"threshold": threshold,
|
||||
"operator": operator
|
||||
},
|
||||
comparisonSpaceAggregationParam={"threshold": threshold, "operator": operator},
|
||||
filter_expression='service = "api"',
|
||||
)
|
||||
|
||||
@@ -140,6 +155,7 @@ def test_histogram_count_for_one_service(
|
||||
assert result_values[0]["value"] == first_value
|
||||
assert result_values[-1]["value"] == last_value
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"threshold, operator, zeroth_value, first_value, last_value",
|
||||
[
|
||||
@@ -147,12 +163,24 @@ def test_histogram_count_for_one_service(
|
||||
(100, "<=", 1234.5, 1.1, 6.9),
|
||||
(7500, "<=", 12345, 16.75, 74.75),
|
||||
(8000, "<=", 12345, 17, 75),
|
||||
(80000, "<=", 12345, 17, 75), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(
|
||||
80000,
|
||||
"<=",
|
||||
12345,
|
||||
17,
|
||||
75,
|
||||
), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(1000, ">", 0, 7, 7),
|
||||
(100, ">", 11110.5, 16.9, 69.1),
|
||||
(7500, ">", 0, 1.25, 1.25),
|
||||
(8000, ">", 0, 1, 1),
|
||||
(80000, ">", 0, 1, 1), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
(
|
||||
80000,
|
||||
">",
|
||||
0,
|
||||
1,
|
||||
1,
|
||||
), ## cuz we don't know the max value in infinity, all numbers beyond the biggest finite bucket will report the same answer
|
||||
],
|
||||
)
|
||||
def test_histogram_count_for_delta_service(
|
||||
@@ -184,10 +212,7 @@ def test_histogram_count_for_delta_service(
|
||||
metric_name,
|
||||
"increase",
|
||||
"count",
|
||||
comparisonSpaceAggregationParam={
|
||||
"threshold": threshold,
|
||||
"operator": operator
|
||||
},
|
||||
comparisonSpaceAggregationParam={"threshold": threshold, "operator": operator},
|
||||
filter_expression='service = "web"',
|
||||
)
|
||||
|
||||
@@ -196,11 +221,16 @@ def test_histogram_count_for_delta_service(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) == 60 ## in delta, the value at 10:01 will also be reported
|
||||
assert (
|
||||
len(result_values) == 60
|
||||
) ## in delta, the value at 10:01 will also be reported
|
||||
assert result_values[0]["value"] == zeroth_value
|
||||
assert result_values[1]["value"] == first_value ## to keep parallel to the cumulative test cases, first_value refers to the value at 10:02
|
||||
assert (
|
||||
result_values[1]["value"] == first_value
|
||||
) ## to keep parallel to the cumulative test cases, first_value refers to the value at 10:02
|
||||
assert result_values[-1]["value"] == last_value
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"threshold, operator, zeroth_value, first_value, last_value",
|
||||
[
|
||||
@@ -245,10 +275,7 @@ def test_histogram_count_for_all_services(
|
||||
metric_name,
|
||||
"increase",
|
||||
"count",
|
||||
comparisonSpaceAggregationParam={
|
||||
"threshold": threshold,
|
||||
"operator": operator
|
||||
},
|
||||
comparisonSpaceAggregationParam={"threshold": threshold, "operator": operator},
|
||||
## no services filter, this tests for multitemporality handling as well
|
||||
)
|
||||
|
||||
@@ -257,11 +284,16 @@ def test_histogram_count_for_all_services(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) == 60 ## in delta, the value at 10:01 will also be reported
|
||||
assert (
|
||||
len(result_values) == 60
|
||||
) ## in delta, the value at 10:01 will also be reported
|
||||
assert result_values[0]["value"] == zeroth_value
|
||||
assert result_values[1]["value"] == first_value ## to keep parallel to the cumulative test cases, first_value refers to the value at 10:02
|
||||
assert (
|
||||
result_values[1]["value"] == first_value
|
||||
) ## to keep parallel to the cumulative test cases, first_value refers to the value at 10:02
|
||||
assert result_values[-1]["value"] == last_value
|
||||
|
||||
|
||||
def test_histogram_count_no_param(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
@@ -308,8 +340,26 @@ def test_histogram_count_no_param(
|
||||
set(le_buckets.keys()) == expected_buckets
|
||||
), f"Expected endpoints {expected_buckets}, got {set(le_buckets.keys())}"
|
||||
|
||||
first_values = {"1000": 33, "1500": 36, "2000": 39, "4000": 42, "5000": 45, "6000": 48, "8000": 51, "+Inf": 54}
|
||||
last_values = {"1000": 207, "1500": 210, "2000": 213, "4000": 216, "5000": 219, "6000": 222, "8000": 225, "+Inf": 228}
|
||||
first_values = {
|
||||
"1000": 33,
|
||||
"1500": 36,
|
||||
"2000": 39,
|
||||
"4000": 42,
|
||||
"5000": 45,
|
||||
"6000": 48,
|
||||
"8000": 51,
|
||||
"+Inf": 54,
|
||||
}
|
||||
last_values = {
|
||||
"1000": 207,
|
||||
"1500": 210,
|
||||
"2000": 213,
|
||||
"4000": 216,
|
||||
"5000": 219,
|
||||
"6000": 222,
|
||||
"8000": 225,
|
||||
"+Inf": 228,
|
||||
}
|
||||
for le, values in le_buckets.items():
|
||||
assert len(values) == 60
|
||||
|
||||
@@ -318,5 +368,7 @@ def test_histogram_count_no_param(
|
||||
v["value"] >= 0
|
||||
), f"Count for {le} should not be negative: {v['value']}"
|
||||
assert values[0]["value"] == 12345
|
||||
assert values[1]["value"] == first_values[le] ## to keep parallel to the cumulative test cases, first_value refers to the value at 10:02
|
||||
assert values[-1]["value"] == last_values[le]
|
||||
assert (
|
||||
values[1]["value"] == first_values[le]
|
||||
) ## to keep parallel to the cumulative test cases, first_value refers to the value at 10:02
|
||||
assert values[-1]["value"] == last_values[le]
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import random
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
@@ -10,7 +9,6 @@ from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.metrics import Metrics
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
get_all_series,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
)
|
||||
@@ -18,6 +16,7 @@ from fixtures.utils import get_testdata_file_path
|
||||
|
||||
FILE = get_testdata_file_path("gauge_data_1h.jsonl")
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"time_agg, space_agg, service, num_elements, start_val, first_val, twentieth_min_val, after_twentieth_min_val",
|
||||
[
|
||||
@@ -50,7 +49,7 @@ def test_for_one_service(
|
||||
start_val: float,
|
||||
first_val: float,
|
||||
twentieth_min_val: float,
|
||||
after_twentieth_min_val: float ## web service has a gap of 10 mins after the 20th minute
|
||||
after_twentieth_min_val: float, ## web service has a gap of 10 mins after the 20th minute
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
@@ -84,6 +83,7 @@ def test_for_one_service(
|
||||
assert result_values[19]["value"] == twentieth_min_val
|
||||
assert result_values[20]["value"] == after_twentieth_min_val
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"time_agg, space_agg, start_val, first_val, twentieth_min_val, twenty_first_min_val, thirty_first_min_val",
|
||||
[
|
||||
@@ -105,8 +105,8 @@ def test_for_multiple_aggregations(
|
||||
start_val: float,
|
||||
first_val: float,
|
||||
twentieth_min_val: float,
|
||||
twenty_first_min_val: float, ## web service has a gap of 10 mins after the 20th minute
|
||||
thirty_first_min_val: float
|
||||
twenty_first_min_val: float, ## web service has a gap of 10 mins after the 20th minute
|
||||
thirty_first_min_val: float,
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
@@ -138,4 +138,4 @@ def test_for_multiple_aggregations(
|
||||
assert result_values[1]["value"] == first_val
|
||||
assert result_values[19]["value"] == twentieth_min_val
|
||||
assert result_values[20]["value"] == twenty_first_min_val
|
||||
assert result_values[30]["value"] == thirty_first_min_val
|
||||
assert result_values[30]["value"] == thirty_first_min_val
|
||||
|
||||
@@ -53,7 +53,9 @@ def test_rate_with_steady_values_and_reset(
|
||||
|
||||
data = response.json()
|
||||
result_values = sorted(get_series_values(data, "A"), key=lambda x: x["timestamp"])
|
||||
assert len(result_values) == 60 ## total 61 minutes covered, and 30th minute is missing
|
||||
assert (
|
||||
len(result_values) == 60
|
||||
) ## total 61 minutes covered, and 30th minute is missing
|
||||
assert (
|
||||
result_values[30]["value"] == 0.0333
|
||||
) # reset happens and [30] is for 31st minute. 2/60 cuz delta divides by step interval
|
||||
@@ -61,9 +63,7 @@ def test_rate_with_steady_values_and_reset(
|
||||
result_values[31]["value"] == 0.133
|
||||
) # i.e 8/60 i.e 31st to 32nd minute changes
|
||||
count_of_steady_rate = sum(1 for v in result_values if v["value"] == 0.0833)
|
||||
assert (
|
||||
count_of_steady_rate == 58
|
||||
) # 1 reset + 1 high rate are excluded
|
||||
assert count_of_steady_rate == 58 # 1 reset + 1 high rate are excluded
|
||||
# All rates should be non-negative (stale periods = 0 rate)
|
||||
for v in result_values:
|
||||
assert v["value"] >= 0, f"Rate should not be negative: {v['value']}"
|
||||
|
||||
109
tests/integration/src/querier/11_cost_meter.py
Normal file
109
tests/integration/src/querier/11_cost_meter.py
Normal file
@@ -0,0 +1,109 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.meter import MeterSample, make_meter_samples
|
||||
from fixtures.querier import (
|
||||
build_builder_query,
|
||||
get_series_values,
|
||||
make_query_request,
|
||||
)
|
||||
|
||||
|
||||
def test_query_range_cost_meter(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_meter_samples: Callable[[List[MeterSample]], None],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
|
||||
metric_name = "signoz_cost_test_query_range"
|
||||
labels = {"service": "test-service", "environment": "production"}
|
||||
|
||||
samples = make_meter_samples(
|
||||
metric_name,
|
||||
labels,
|
||||
now,
|
||||
count=60,
|
||||
base_value=100.0,
|
||||
temporality="Delta",
|
||||
type_="Sum",
|
||||
is_monotonic=True,
|
||||
)
|
||||
insert_meter_samples(samples)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
query = build_builder_query(
|
||||
"A",
|
||||
metric_name,
|
||||
"sum",
|
||||
"sum",
|
||||
source="meter",
|
||||
temporality="delta",
|
||||
)
|
||||
|
||||
response = make_query_request(signoz, token, start_ms, end_ms, [query])
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
result_values = get_series_values(data, "A")
|
||||
assert len(result_values) > 0, f"Expected non-empty results, got: {data}"
|
||||
|
||||
for val in result_values:
|
||||
assert val["value"] >= 0, f"Expected non-negative value, got: {val['value']}"
|
||||
|
||||
|
||||
def test_list_meter_metric_names(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_meter_samples: Callable[[List[MeterSample]], None],
|
||||
) -> None:
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ms = int((now - timedelta(minutes=65)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
|
||||
metric_name = "cost_test_list_metrics"
|
||||
labels = {"service": "billing-service"}
|
||||
|
||||
samples = make_meter_samples(
|
||||
metric_name,
|
||||
labels,
|
||||
now,
|
||||
count=5,
|
||||
base_value=50.0,
|
||||
temporality="Delta",
|
||||
type_="Sum",
|
||||
is_monotonic=True,
|
||||
)
|
||||
insert_meter_samples(samples)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/metrics"),
|
||||
params={
|
||||
"start": start_ms,
|
||||
"end": end_ms,
|
||||
"limit": 100,
|
||||
"searchText": "cost_test_list",
|
||||
},
|
||||
headers={"authorization": f"Bearer {token}"},
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
|
||||
data = response.json()
|
||||
metrics = data.get("data", {}).get("metrics", [])
|
||||
metric_names = [m["metricName"] for m in metrics]
|
||||
assert (
|
||||
metric_name in metric_names
|
||||
), f"Expected {metric_name} in metric names, got: {metric_names}"
|
||||
Reference in New Issue
Block a user