Compare commits

..

13 Commits

Author SHA1 Message Date
Naman Verma
0f403c82af fix: lint fix in test 2026-04-27 12:54:16 +05:30
Naman Verma
bd9dccf97d fix: pylint fix by adding new line 2026-04-27 12:11:50 +05:30
Naman Verma
af9d6734d9 Merge branch 'main' into nv/4325 2026-04-27 11:50:05 +05:30
Naman Verma
dfccfa269f Merge branch 'main' into nv/4325 2026-04-27 11:47:58 +05:30
Naman Verma
2772ab94d3 fix: no warnings or errors for internal metrics 2026-04-27 11:47:01 +05:30
Naman Verma
5eb2ab2cb8 chore: missing newline between tests 2026-04-20 15:34:51 +05:30
Naman Verma
0de84488ff Merge branch 'main' into nv/4325 2026-04-20 15:33:36 +05:30
Naman Verma
4e05c86286 Merge branch 'main' into nv/4325 2026-04-08 23:18:19 +05:30
Naman Verma
e0756e38eb Merge branch 'main' into nv/4325 2026-04-08 09:33:46 +05:30
Naman Verma
4e6de5c826 Merge branch 'main' into nv/4325 2026-04-07 20:40:36 +05:30
Naman Verma
a687c61919 chore: py fmt add new line 2026-04-06 11:10:04 +05:30
Naman Verma
4066425952 chore: lint fix by removing unused list 2026-04-06 10:34:43 +05:30
Naman Verma
2381cf1da0 fix: show warning for non-existent cost meter metrics 2026-04-06 09:58:24 +05:30
4 changed files with 60 additions and 91 deletions

View File

@@ -627,27 +627,17 @@ func convertTimeSeriesDataToScalar(tsData *qbtypes.TimeSeriesData, queryName str
return &qbtypes.ScalarData{QueryName: queryName}
}
// Series can have ragged label sets; build the column schema from the
// union of all label keys (first-seen order) and fill rows by key lookup.
keyOrder := []telemetrytypes.TelemetryFieldKey{}
keyIndex := map[string]int{}
for _, series := range tsData.Aggregations[0].Series {
for _, label := range series.Labels {
if _, ok := keyIndex[label.Key.Name]; ok {
continue
}
keyIndex[label.Key.Name] = len(keyOrder)
keyOrder = append(keyOrder, label.Key)
}
}
columns := []*qbtypes.ColumnDescriptor{}
columns := make([]*qbtypes.ColumnDescriptor, 0, len(keyOrder)+len(tsData.Aggregations))
for _, key := range keyOrder {
columns = append(columns, &qbtypes.ColumnDescriptor{
TelemetryFieldKey: key,
QueryName: queryName,
Type: qbtypes.ColumnTypeGroup,
})
// Add group columns from first series
if len(tsData.Aggregations[0].Series) > 0 {
for _, label := range tsData.Aggregations[0].Series[0].Labels {
columns = append(columns, &qbtypes.ColumnDescriptor{
TelemetryFieldKey: label.Key,
QueryName: queryName,
Type: qbtypes.ColumnTypeGroup,
})
}
}
// Add aggregation columns
@@ -665,18 +655,18 @@ func convertTimeSeriesDataToScalar(tsData *qbtypes.TimeSeriesData, queryName str
})
}
// Build rows.
groupColCount := len(keyOrder)
// Build rows
data := [][]any{}
for seriesIdx, series := range tsData.Aggregations[0].Series {
row := make([]any, len(columns))
// Place each label under its key's column (by lookup, not index).
for _, label := range series.Labels {
row[keyIndex[label.Key.Name]] = label.Value
// Add group values
for i, label := range series.Labels {
row[i] = label.Value
}
// Add aggregation values (last value)
groupColCount := len(series.Labels)
for aggIdx, agg := range tsData.Aggregations {
if seriesIdx < len(agg.Series) && len(agg.Series[seriesIdx].Values) > 0 {
lastValue := agg.Series[seriesIdx].Values[len(agg.Series[seriesIdx].Values)-1].Value

View File

@@ -1,53 +0,0 @@
package querier
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
// Multiple series with different number of labels, shouldn't panic and should align labels correctly.
func TestConvertTimeSeriesDataToScalar_RaggedLabels(t *testing.T) {
label := func(name string, value any) *qbtypes.Label {
return &qbtypes.Label{
Key: telemetrytypes.TelemetryFieldKey{Name: name},
Value: value,
}
}
series := func(labels []*qbtypes.Label, value float64) *qbtypes.TimeSeries {
return &qbtypes.TimeSeries{
Labels: labels,
Values: []*qbtypes.TimeSeriesValue{{Timestamp: 1, Value: value}},
}
}
tsData := &qbtypes.TimeSeriesData{
QueryName: "A",
Aggregations: []*qbtypes.AggregationBucket{{
Index: 0,
Series: []*qbtypes.TimeSeries{
series([]*qbtypes.Label{label("label_1", "orphan-0")}, 20),
series([]*qbtypes.Label{label("label_1", "box-0"), label("label_2", "rpc-0")}, 10),
},
}},
}
var sd *qbtypes.ScalarData
require.NotPanics(t, func() {
sd = convertTimeSeriesDataToScalar(tsData, "A")
})
require.NotNil(t, sd)
require.Len(t, sd.Columns, 3)
assert.Equal(t, "label_1", sd.Columns[0].Name)
assert.Equal(t, "label_2", sd.Columns[1].Name)
assert.Equal(t, "__result_0", sd.Columns[2].Name)
require.Len(t, sd.Data, 2)
assert.Equal(t, []any{"orphan-0", nil, 20.0}, sd.Data[0])
assert.Equal(t, []any{"box-0", "rpc-0", 10.0}, sd.Data[1])
}

View File

@@ -426,9 +426,17 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
}
nonExistentMetrics := []string{}
var dormantMetricsWarningMsg string
if len(missingMetrics) > 0 {
lastSeenInfo, _ := q.metadataStore.FetchLastSeenInfoMulti(ctx, missingMetrics...)
for _, missingMetricName := range missingMetrics {
// internal metrics aren't user-controlled — skip errors/warnings for them since users can't act on them
isInternalMetric := func(n string) bool { return strings.HasPrefix(n, "signoz.") || strings.HasPrefix(n, "signoz_") }
externalMissingMetrics := make([]string, 0, len(missingMetrics))
for _, m := range missingMetrics {
if !isInternalMetric(m) {
externalMissingMetrics = append(externalMissingMetrics, m)
}
}
if len(externalMissingMetrics) > 0 {
lastSeenInfo, _ := q.metadataStore.FetchLastSeenInfoMulti(ctx, externalMissingMetrics...)
for _, missingMetricName := range externalMissingMetrics {
if ts, ok := lastSeenInfo[missingMetricName]; ok && ts > 0 {
continue
}
@@ -440,24 +448,22 @@ func (q *querier) QueryRange(ctx context.Context, orgID valuer.UUID, req *qbtype
return nil, errors.NewNotFoundf(errors.CodeNotFound, "the following metrics were not found: %s", strings.Join(nonExistentMetrics, ", "))
}
lastSeenStr := func(name string) string {
if ts, ok := lastSeenInfo[name]; ok && ts > 0 {
ago := humanize.RelTime(time.UnixMilli(ts), time.Now(), "ago", "from now")
return fmt.Sprintf("%s (last seen %s)", name, ago)
}
return name // this case won't come cuz lastSeenStr is never called for metrics in nonExistentMetrics
ts := lastSeenInfo[name]
ago := humanize.RelTime(time.UnixMilli(ts), time.Now(), "ago", "from now")
return fmt.Sprintf("%s (last seen %s)", name, ago)
}
if len(missingMetrics) == 1 {
dormantMetricsWarningMsg = fmt.Sprintf("no data found for the metric %s in the query time range", lastSeenStr(missingMetrics[0]))
if len(externalMissingMetrics) == 1 {
dormantMetricsWarningMsg = fmt.Sprintf("no data found for the metric %s in the query time range", lastSeenStr(externalMissingMetrics[0]))
} else {
parts := make([]string, len(missingMetrics))
for i, m := range missingMetrics {
parts := make([]string, len(externalMissingMetrics))
for i, m := range externalMissingMetrics {
parts[i] = lastSeenStr(m)
}
dormantMetricsWarningMsg = fmt.Sprintf("no data found for the following metrics in the query time range: %s", strings.Join(parts, ", "))
}
}
preseededResults := make(map[string]any)
for _, name := range missingMetricQueries { // at this point missing metrics will not have any non existent metrics, only normal ones
for _, name := range missingMetricQueries {
switch req.RequestType {
case qbtypes.RequestTypeTimeSeries:
preseededResults[name] = &qbtypes.TimeSeriesData{QueryName: name}

View File

@@ -640,6 +640,32 @@ def test_non_existent_metrics_returns_404(
assert get_error_message(response.json()) == "could not find the metric whatevergoennnsgoeshere"
def test_non_existent_internal_metrics_returns_no_warning(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
) -> None:
now = datetime.now(tz=UTC).replace(second=0, microsecond=0)
metric_name = "signoz_calls_total"
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
query = build_builder_query(
"A",
metric_name,
"doesnotreallymatter",
"sum",
)
end_ms = int(now.timestamp() * 1000)
start_2h = int((now - timedelta(hours=2)).timestamp() * 1000)
response = make_query_request(signoz, token, start_2h, end_ms, [query])
assert response.status_code == HTTPStatus.OK
data = response.json()
assert get_all_warnings(data) == []
# Verify /api/v1/fields/values filters label values by metricNamespace prefix.
# Inserts metrics under ns.a and ns.b, then asserts a specific prefix returns
# only matching values while a common prefix returns both.