mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-03 08:33:26 +00:00
feat: extract context and data type from telemetry field name (#9986)
This pull request introduces significant improvements to the handling and normalization of telemetry field keys, adds comprehensive tests for these changes, and refactors JSON unmarshaling for query builder types to ensure consistent normalization and error handling
This commit is contained in:
@@ -1,6 +1,8 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
@@ -131,12 +133,44 @@ func (q *QueryBuilderQuery[T]) UnmarshalJSON(data []byte) error {
|
||||
|
||||
var temp Alias
|
||||
// Use UnmarshalJSONWithContext for better error messages
|
||||
if err := UnmarshalJSONWithContext(data, &temp, "query spec"); err != nil {
|
||||
if err := UnmarshalJSONWithContext(data, &temp, fmt.Sprintf("query spec for %T", q)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the decoded values back to the original struct
|
||||
*q = QueryBuilderQuery[T](temp)
|
||||
|
||||
// Nomarlize the query after unmarshaling
|
||||
q.Normalize()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Normalize normalizes all the field keys in the query
|
||||
func (q *QueryBuilderQuery[T]) Normalize() {
|
||||
|
||||
// normalize select fields
|
||||
for idx := range q.SelectFields {
|
||||
q.SelectFields[idx].Normalize()
|
||||
}
|
||||
|
||||
// normalize group by fields
|
||||
for idx := range q.GroupBy {
|
||||
q.GroupBy[idx].Normalize()
|
||||
}
|
||||
|
||||
// normalize order by fields
|
||||
for idx := range q.Order {
|
||||
q.Order[idx].Key.Normalize()
|
||||
}
|
||||
|
||||
// normalize secondary aggregations
|
||||
for idx := range q.SecondaryAggregations {
|
||||
for jdx := range q.SecondaryAggregations[idx].Order {
|
||||
q.SecondaryAggregations[idx].Order[jdx].Key.Normalize()
|
||||
}
|
||||
for jdx := range q.SecondaryAggregations[idx].GroupBy {
|
||||
q.SecondaryAggregations[idx].GroupBy[jdx].Normalize()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,653 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQueryBuilderQuery_Copy(t *testing.T) {
|
||||
t.Run("copy with all fields populated", func(t *testing.T) {
|
||||
original := QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
StepInterval: Step{Duration: 60 * time.Second},
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Source: telemetrytypes.SourceUnspecified,
|
||||
Aggregations: []TraceAggregation{
|
||||
{
|
||||
Expression: "count()",
|
||||
Alias: "trace_count",
|
||||
},
|
||||
},
|
||||
Disabled: false,
|
||||
Filter: &Filter{
|
||||
Expression: "service.name = 'frontend'",
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Direction: OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
SelectFields: []telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "trace_id",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
Cursor: "cursor123",
|
||||
LimitBy: &LimitBy{
|
||||
Value: "10",
|
||||
Keys: []string{
|
||||
"service.name",
|
||||
},
|
||||
},
|
||||
Having: &Having{
|
||||
Expression: "count() > 100",
|
||||
},
|
||||
SecondaryAggregations: []SecondaryAggregation{
|
||||
{
|
||||
Limit: 10,
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "value",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Direction: OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "region",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Functions: []Function{
|
||||
{
|
||||
Name: FunctionNameTimeShift,
|
||||
Args: []FunctionArg{
|
||||
{
|
||||
Name: "shift",
|
||||
Value: "1h",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Legend: "{{service.name}}",
|
||||
ShiftBy: 3600000,
|
||||
}
|
||||
|
||||
// Create a copy
|
||||
copied := original.Copy()
|
||||
|
||||
// Assert that values are equal
|
||||
assert.Equal(t, original.Name, copied.Name)
|
||||
assert.Equal(t, original.StepInterval, copied.StepInterval)
|
||||
assert.Equal(t, original.Signal, copied.Signal)
|
||||
assert.Equal(t, original.Source, copied.Source)
|
||||
assert.Equal(t, original.Disabled, copied.Disabled)
|
||||
assert.Equal(t, original.Limit, copied.Limit)
|
||||
assert.Equal(t, original.Offset, copied.Offset)
|
||||
assert.Equal(t, original.Cursor, copied.Cursor)
|
||||
assert.Equal(t, original.Legend, copied.Legend)
|
||||
assert.Equal(t, original.ShiftBy, copied.ShiftBy)
|
||||
|
||||
// Assert deep copies for slices and pointers
|
||||
require.NotNil(t, copied.Aggregations)
|
||||
assert.Equal(t, len(original.Aggregations), len(copied.Aggregations))
|
||||
assert.Equal(t, original.Aggregations[0].Expression, copied.Aggregations[0].Expression)
|
||||
|
||||
require.NotNil(t, copied.Filter)
|
||||
assert.Equal(t, original.Filter.Expression, copied.Filter.Expression)
|
||||
|
||||
require.NotNil(t, copied.GroupBy)
|
||||
assert.Equal(t, len(original.GroupBy), len(copied.GroupBy))
|
||||
assert.Equal(t, original.GroupBy[0].Name, copied.GroupBy[0].Name)
|
||||
|
||||
require.NotNil(t, copied.Order)
|
||||
assert.Equal(t, len(original.Order), len(copied.Order))
|
||||
assert.Equal(t, original.Order[0].Key.Name, copied.Order[0].Key.Name)
|
||||
|
||||
require.NotNil(t, copied.SelectFields)
|
||||
assert.Equal(t, len(original.SelectFields), len(copied.SelectFields))
|
||||
assert.Equal(t, original.SelectFields[0].Name, copied.SelectFields[0].Name)
|
||||
|
||||
require.NotNil(t, copied.LimitBy)
|
||||
assert.Equal(t, original.LimitBy.Value, copied.LimitBy.Value)
|
||||
assert.Equal(t, len(original.LimitBy.Keys), len(copied.LimitBy.Keys))
|
||||
|
||||
require.NotNil(t, copied.Having)
|
||||
assert.Equal(t, original.Having.Expression, copied.Having.Expression)
|
||||
|
||||
require.NotNil(t, copied.SecondaryAggregations)
|
||||
assert.Equal(t, len(original.SecondaryAggregations), len(copied.SecondaryAggregations))
|
||||
assert.Equal(t, original.SecondaryAggregations[0].Limit, copied.SecondaryAggregations[0].Limit)
|
||||
|
||||
require.NotNil(t, copied.Functions)
|
||||
assert.Equal(t, len(original.Functions), len(copied.Functions))
|
||||
assert.Equal(t, original.Functions[0].Name, copied.Functions[0].Name)
|
||||
|
||||
// Verify independence - modify copied and ensure original is unchanged
|
||||
copied.Name = "B"
|
||||
assert.Equal(t, "A", original.Name)
|
||||
|
||||
copied.Aggregations[0].Expression = "sum()"
|
||||
assert.Equal(t, "count()", original.Aggregations[0].Expression)
|
||||
|
||||
copied.Filter.Expression = "modified"
|
||||
assert.Equal(t, "service.name = 'frontend'", original.Filter.Expression)
|
||||
|
||||
copied.GroupBy[0].Name = "modified"
|
||||
assert.Equal(t, "service.name", original.GroupBy[0].Name)
|
||||
|
||||
copied.Order[0].Key.Name = "modified"
|
||||
assert.Equal(t, "timestamp", original.Order[0].Key.Name)
|
||||
|
||||
copied.SelectFields[0].Name = "modified"
|
||||
assert.Equal(t, "trace_id", original.SelectFields[0].Name)
|
||||
|
||||
copied.LimitBy.Value = "999"
|
||||
assert.Equal(t, "10", original.LimitBy.Value)
|
||||
|
||||
copied.Having.Expression = "modified"
|
||||
assert.Equal(t, "count() > 100", original.Having.Expression)
|
||||
|
||||
copied.SecondaryAggregations[0].Limit = 999
|
||||
assert.Equal(t, 10, original.SecondaryAggregations[0].Limit)
|
||||
|
||||
copied.Functions[0].Name = FunctionNameAbsolute
|
||||
assert.Equal(t, FunctionNameTimeShift, original.Functions[0].Name)
|
||||
})
|
||||
|
||||
t.Run("copy with nil fields", func(t *testing.T) {
|
||||
original := QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
}
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
assert.Equal(t, original.Name, copied.Name)
|
||||
assert.Equal(t, original.Signal, copied.Signal)
|
||||
assert.Nil(t, copied.Aggregations)
|
||||
assert.Nil(t, copied.Filter)
|
||||
assert.Nil(t, copied.GroupBy)
|
||||
assert.Nil(t, copied.Order)
|
||||
assert.Nil(t, copied.SelectFields)
|
||||
assert.Nil(t, copied.LimitBy)
|
||||
assert.Nil(t, copied.Having)
|
||||
assert.Nil(t, copied.SecondaryAggregations)
|
||||
assert.Nil(t, copied.Functions)
|
||||
})
|
||||
|
||||
t.Run("copy metric aggregation query", func(t *testing.T) {
|
||||
original := QueryBuilderQuery[MetricAggregation]{
|
||||
Name: "M",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []MetricAggregation{
|
||||
{
|
||||
MetricName: "cpu_usage",
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
assert.Equal(t, original.Name, copied.Name)
|
||||
assert.Equal(t, original.Signal, copied.Signal)
|
||||
require.NotNil(t, copied.Aggregations)
|
||||
assert.Equal(t, original.Aggregations[0].MetricName, copied.Aggregations[0].MetricName)
|
||||
assert.Equal(t, original.Aggregations[0].SpaceAggregation, copied.Aggregations[0].SpaceAggregation)
|
||||
|
||||
// Verify independence
|
||||
copied.Aggregations[0].MetricName = "modified"
|
||||
assert.Equal(t, "cpu_usage", original.Aggregations[0].MetricName)
|
||||
})
|
||||
|
||||
t.Run("copy log aggregation query", func(t *testing.T) {
|
||||
original := QueryBuilderQuery[LogAggregation]{
|
||||
Name: "L",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Aggregations: []LogAggregation{
|
||||
{
|
||||
Expression: "count()",
|
||||
Alias: "log_count",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
assert.Equal(t, original.Name, copied.Name)
|
||||
assert.Equal(t, original.Signal, copied.Signal)
|
||||
require.NotNil(t, copied.Aggregations)
|
||||
assert.Equal(t, original.Aggregations[0].Expression, copied.Aggregations[0].Expression)
|
||||
|
||||
// Verify independence
|
||||
copied.Aggregations[0].Expression = "sum()"
|
||||
assert.Equal(t, "count()", original.Aggregations[0].Expression)
|
||||
})
|
||||
}
|
||||
|
||||
func TestQueryBuilderQuery_Normalize(t *testing.T) {
|
||||
t.Run("normalize select fields", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
SelectFields: []telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
{
|
||||
Name: "span.name",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query.Normalize()
|
||||
|
||||
// Normalize only changes FieldContext, not the Name
|
||||
assert.Equal(t, "service.name", query.SelectFields[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.SelectFields[0].FieldContext)
|
||||
assert.Equal(t, "span.name", query.SelectFields[1].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextSpan, query.SelectFields[1].FieldContext)
|
||||
})
|
||||
|
||||
t.Run("normalize group by fields", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query.Normalize()
|
||||
|
||||
assert.Equal(t, "service.name", query.GroupBy[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.GroupBy[0].FieldContext)
|
||||
})
|
||||
|
||||
t.Run("normalize order by fields", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Direction: OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query.Normalize()
|
||||
|
||||
assert.Equal(t, "timestamp", query.Order[0].Key.Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextSpan, query.Order[0].Key.FieldContext)
|
||||
})
|
||||
|
||||
t.Run("normalize secondary aggregations", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
SecondaryAggregations: []SecondaryAggregation{
|
||||
{
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "value",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Direction: OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "region",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query.Normalize()
|
||||
|
||||
assert.Equal(t, "value", query.SecondaryAggregations[0].Order[0].Key.Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextSpan, query.SecondaryAggregations[0].Order[0].Key.FieldContext)
|
||||
assert.Equal(t, "region", query.SecondaryAggregations[0].GroupBy[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.SecondaryAggregations[0].GroupBy[0].FieldContext)
|
||||
})
|
||||
|
||||
t.Run("normalize with nil fields", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
}
|
||||
|
||||
// Should not panic
|
||||
query.Normalize()
|
||||
|
||||
assert.Equal(t, "A", query.Name)
|
||||
})
|
||||
|
||||
t.Run("normalize all fields together", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
SelectFields: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "service.name", FieldContext: telemetrytypes.FieldContextResource},
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "host.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "duration",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecondaryAggregations: []SecondaryAggregation{
|
||||
{
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "count",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "status.code",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query.Normalize()
|
||||
|
||||
assert.Equal(t, "service.name", query.SelectFields[0].Name)
|
||||
assert.Equal(t, "host.name", query.GroupBy[0].Name)
|
||||
assert.Equal(t, "duration", query.Order[0].Key.Name)
|
||||
assert.Equal(t, "count", query.SecondaryAggregations[0].Order[0].Key.Name)
|
||||
assert.Equal(t, "status.code", query.SecondaryAggregations[0].GroupBy[0].Name)
|
||||
})
|
||||
}
|
||||
|
||||
func TestQueryBuilderQuery_UnmarshalJSON(t *testing.T) {
|
||||
t.Run("valid trace query", func(t *testing.T) {
|
||||
jsonData := `{
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"stepInterval": 60,
|
||||
"aggregations": [{
|
||||
"expression": "count()",
|
||||
"alias": "trace_count"
|
||||
}],
|
||||
"filter": {
|
||||
"expression": "service.name = 'frontend'"
|
||||
},
|
||||
"groupBy": [{
|
||||
"name": "service.name",
|
||||
"fieldContext": "resource"
|
||||
}],
|
||||
"order": [{
|
||||
"key": {
|
||||
"name": "service.name",
|
||||
"fieldContext": "resource"
|
||||
},
|
||||
"direction": "desc"
|
||||
}],
|
||||
"limit": 100
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[TraceAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "A", query.Name)
|
||||
assert.Equal(t, telemetrytypes.SignalTraces, query.Signal)
|
||||
assert.Equal(t, int64(60000), query.StepInterval.Milliseconds())
|
||||
assert.Equal(t, 1, len(query.Aggregations))
|
||||
assert.Equal(t, "count()", query.Aggregations[0].Expression)
|
||||
assert.Equal(t, "trace_count", query.Aggregations[0].Alias)
|
||||
require.NotNil(t, query.Filter)
|
||||
assert.Equal(t, "service.name = 'frontend'", query.Filter.Expression)
|
||||
assert.Equal(t, 1, len(query.GroupBy))
|
||||
assert.Equal(t, "service.name", query.GroupBy[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.GroupBy[0].FieldContext)
|
||||
assert.Equal(t, 1, len(query.Order))
|
||||
assert.Equal(t, "service.name", query.Order[0].Key.Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.Order[0].Key.FieldContext)
|
||||
assert.Equal(t, OrderDirectionDesc, query.Order[0].Direction)
|
||||
assert.Equal(t, 100, query.Limit)
|
||||
})
|
||||
|
||||
t.Run("valid metric query", func(t *testing.T) {
|
||||
jsonData := `{
|
||||
"name": "M",
|
||||
"signal": "metrics",
|
||||
"stepInterval": "1m",
|
||||
"aggregations": [{
|
||||
"metricName": "cpu_usage",
|
||||
"spaceAggregation": "avg",
|
||||
"timeAggregation": "avg"
|
||||
}]
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[MetricAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "M", query.Name)
|
||||
assert.Equal(t, telemetrytypes.SignalMetrics, query.Signal)
|
||||
assert.Equal(t, int64(60000), query.StepInterval.Milliseconds())
|
||||
assert.Equal(t, 1, len(query.Aggregations))
|
||||
assert.Equal(t, "cpu_usage", query.Aggregations[0].MetricName)
|
||||
})
|
||||
|
||||
t.Run("valid log query", func(t *testing.T) {
|
||||
jsonData := `{
|
||||
"name": "L",
|
||||
"signal": "logs",
|
||||
"aggregations": [{
|
||||
"expression": "count()",
|
||||
"alias": "log_count"
|
||||
}]
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[LogAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "L", query.Name)
|
||||
assert.Equal(t, telemetrytypes.SignalLogs, query.Signal)
|
||||
assert.Equal(t, 1, len(query.Aggregations))
|
||||
assert.Equal(t, "count()", query.Aggregations[0].Expression)
|
||||
})
|
||||
|
||||
t.Run("unknown field error", func(t *testing.T) {
|
||||
jsonData := `{
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"unknownField": "value"
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[TraceAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown field")
|
||||
})
|
||||
|
||||
t.Run("query with all optional fields", func(t *testing.T) {
|
||||
// NOTE: This json payload is not realistic, just for testing all fields
|
||||
jsonData := `{
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"stepInterval": "5m",
|
||||
"source": "traces",
|
||||
"aggregations": [{
|
||||
"expression": "count()",
|
||||
"alias": "span.count"
|
||||
}],
|
||||
"disabled": true,
|
||||
"filter": {
|
||||
"expression": "service.name = 'api'"
|
||||
},
|
||||
"groupBy": [{
|
||||
"name": "service.name",
|
||||
"fieldContext": "resource"
|
||||
}],
|
||||
"order": [{
|
||||
"key": {
|
||||
"name": "timestamp",
|
||||
"fieldContext": "span"
|
||||
},
|
||||
"direction": "asc"
|
||||
}],
|
||||
"selectFields": [{
|
||||
"name": "trace_id",
|
||||
"fieldContext": "span"
|
||||
}],
|
||||
"limit": 50,
|
||||
"offset": 10,
|
||||
"cursor": "cursor123",
|
||||
"limitBy": {
|
||||
"value": "5",
|
||||
"keys": ["service.name"]
|
||||
},
|
||||
"having": {
|
||||
"expression": "count() > 10"
|
||||
},
|
||||
"secondaryAggregations": [{
|
||||
"limit": 20,
|
||||
"order": [{
|
||||
"key": {
|
||||
"name": "value",
|
||||
"fieldContext": "span"
|
||||
},
|
||||
"direction": "desc"
|
||||
}],
|
||||
"groupBy": [{
|
||||
"name": "region",
|
||||
"fieldContext": "resource"
|
||||
}]
|
||||
}],
|
||||
"functions": [{
|
||||
"name": "timeShift",
|
||||
"args": [{
|
||||
"name": "shift",
|
||||
"value": "1h"
|
||||
}]
|
||||
}],
|
||||
"legend": "{{service.name}}"
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[TraceAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "A", query.Name)
|
||||
assert.Equal(t, telemetrytypes.SignalTraces, query.Signal)
|
||||
assert.Equal(t, int64(300000), query.StepInterval.Milliseconds())
|
||||
// Source is set in the JSON, so it should be "traces", not SourceUnspecified
|
||||
assert.Equal(t, "traces", query.Source.String.StringValue())
|
||||
assert.True(t, query.Disabled)
|
||||
assert.Equal(t, query.Aggregations[0].Expression, "count()")
|
||||
assert.Equal(t, query.Aggregations[0].Alias, "span.count")
|
||||
assert.NotNil(t, query.Filter)
|
||||
assert.NotNil(t, query.GroupBy)
|
||||
assert.NotNil(t, query.Order)
|
||||
assert.NotNil(t, query.SelectFields)
|
||||
assert.Equal(t, 50, query.Limit)
|
||||
assert.Equal(t, 10, query.Offset)
|
||||
assert.Equal(t, "cursor123", query.Cursor)
|
||||
assert.NotNil(t, query.LimitBy)
|
||||
assert.NotNil(t, query.Having)
|
||||
assert.NotNil(t, query.SecondaryAggregations)
|
||||
assert.NotNil(t, query.Functions)
|
||||
assert.Equal(t, "{{service.name}}", query.Legend)
|
||||
})
|
||||
|
||||
t.Run("normalization happens during unmarshaling", func(t *testing.T) {
|
||||
jsonData := `{
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"selectFields": [{
|
||||
"name": "resource.service.name"
|
||||
}],
|
||||
"groupBy": [{
|
||||
"name": "resource.host.name"
|
||||
}],
|
||||
"order": [{
|
||||
"key": {
|
||||
"name": "span.duration"
|
||||
},
|
||||
"direction": "desc"
|
||||
}]
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[TraceAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
// FieldContext should be normalized, Name should remain as-is
|
||||
assert.Equal(t, "service.name", query.SelectFields[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.SelectFields[0].FieldContext)
|
||||
assert.Equal(t, "host.name", query.GroupBy[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.GroupBy[0].FieldContext)
|
||||
assert.Equal(t, "duration", query.Order[0].Key.Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextSpan, query.Order[0].Key.FieldContext)
|
||||
})
|
||||
}
|
||||
@@ -47,19 +47,19 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
switch header.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
var spec QueryBuilderQuery[TraceAggregation]
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid trace builder query spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
case telemetrytypes.SignalLogs:
|
||||
var spec QueryBuilderQuery[LogAggregation]
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid log builder query spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
case telemetrytypes.SignalMetrics:
|
||||
var spec QueryBuilderQuery[MetricAggregation]
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid metric builder query spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
@@ -75,6 +75,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeFormula:
|
||||
var spec QueryBuilderFormula
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderFormula
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "formula spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid formula spec: %v", err)
|
||||
}
|
||||
@@ -82,6 +83,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeJoin:
|
||||
var spec QueryBuilderJoin
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderJoin
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "join spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid join spec: %v", err)
|
||||
}
|
||||
@@ -89,13 +91,14 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeTraceOperator:
|
||||
var spec QueryBuilderTraceOperator
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "trace operator spec"); err != nil {
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid trace operator spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypePromQL:
|
||||
var spec PromQuery
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for PromQuery
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "PromQL spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid PromQL spec: %v", err)
|
||||
}
|
||||
@@ -103,6 +106,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeClickHouseSQL:
|
||||
var spec ClickHouseQuery
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for ClickHouseQuery
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "ClickHouse SQL spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid ClickHouse SQL spec: %v", err)
|
||||
}
|
||||
|
||||
@@ -366,6 +366,45 @@ func (q QueryBuilderTraceOperator) Copy() QueryBuilderTraceOperator {
|
||||
return c
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (q *QueryBuilderTraceOperator) UnmarshalJSON(data []byte) error {
|
||||
// Define a type alias to avoid infinite recursion
|
||||
type Alias QueryBuilderTraceOperator
|
||||
|
||||
var temp Alias
|
||||
// Use UnmarshalJSONWithContext for better error messages
|
||||
if err := UnmarshalJSONWithContext(data, &temp, "query spec"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the decoded values back to the original struct
|
||||
*q = QueryBuilderTraceOperator(temp)
|
||||
|
||||
// Nomarlize the query after unmarshaling
|
||||
q.Normalize()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Normalize normalizes all the field keys in the query
|
||||
func (q *QueryBuilderTraceOperator) Normalize() {
|
||||
|
||||
// normalize select fields
|
||||
for idx := range q.SelectFields {
|
||||
q.SelectFields[idx].Normalize()
|
||||
}
|
||||
|
||||
// normalize group by fields
|
||||
for idx := range q.GroupBy {
|
||||
q.GroupBy[idx].Normalize()
|
||||
}
|
||||
|
||||
// normalize order by fields
|
||||
for idx := range q.Order {
|
||||
q.Order[idx].Key.Normalize()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ValidateUniqueTraceOperator ensures only one trace operator exists in queries
|
||||
func ValidateUniqueTraceOperator(queries []QueryEnvelope) error {
|
||||
traceOperatorCount := 0
|
||||
|
||||
@@ -51,63 +51,118 @@ func (f TelemetryFieldKey) String() string {
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// GetFieldKeyFromKeyText returns a TelemetryFieldKey from a key text.
|
||||
// The key text is expected to be in the format of `fieldContext.fieldName:fieldDataType` in the search query.
|
||||
func GetFieldKeyFromKeyText(key string) TelemetryFieldKey {
|
||||
func (f TelemetryFieldKey) Text() string {
|
||||
return TelemetryFieldKeyToText(&f)
|
||||
}
|
||||
|
||||
keyTextParts := strings.Split(key, ".")
|
||||
// Normalize parses and normalizes a TelemetryFieldKey by extracting
|
||||
// the field context and data type from the field name if they are not already specified.
|
||||
// This function modifies the key in place.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// key := &TelemetryFieldKey{Name: "resource.service.name:string"}
|
||||
// key.Normalize()
|
||||
// // Result: Name: "service.name", FieldContext: FieldContextResource, FieldDataType: FieldDataTypeString
|
||||
func (f *TelemetryFieldKey) Normalize() {
|
||||
|
||||
var explicitFieldContextProvided, explicitFieldDataTypeProvided bool
|
||||
var explicitFieldContext FieldContext
|
||||
var explicitFieldDataType FieldDataType
|
||||
var ok bool
|
||||
|
||||
if len(keyTextParts) > 1 {
|
||||
explicitFieldContext, ok = fieldContexts[keyTextParts[0]]
|
||||
if ok && explicitFieldContext != FieldContextUnspecified {
|
||||
explicitFieldContextProvided = true
|
||||
}
|
||||
}
|
||||
|
||||
if explicitFieldContextProvided {
|
||||
keyTextParts = keyTextParts[1:]
|
||||
}
|
||||
|
||||
// check if there is a field data type provided
|
||||
if len(keyTextParts) >= 1 {
|
||||
lastPart := keyTextParts[len(keyTextParts)-1]
|
||||
lastPartParts := strings.Split(lastPart, ":")
|
||||
if len(lastPartParts) > 1 {
|
||||
explicitFieldDataType, ok = fieldDataTypes[lastPartParts[1]]
|
||||
if ok && explicitFieldDataType != FieldDataTypeUnspecified {
|
||||
explicitFieldDataTypeProvided = true
|
||||
// Step 1: Parse data type from the right (after the last ":") if not already specified
|
||||
if f.FieldDataType == FieldDataTypeUnspecified {
|
||||
if colonIdx := strings.LastIndex(f.Name, ":"); colonIdx != -1 {
|
||||
potentialDataType := f.Name[colonIdx+1:]
|
||||
if dt, ok := fieldDataTypes[potentialDataType]; ok && dt != FieldDataTypeUnspecified {
|
||||
f.FieldDataType = dt
|
||||
f.Name = f.Name[:colonIdx]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if explicitFieldDataTypeProvided {
|
||||
keyTextParts[len(keyTextParts)-1] = lastPartParts[0]
|
||||
// Step 2: Parse field context from the left if not already specified
|
||||
if f.FieldContext == FieldContextUnspecified {
|
||||
if dotIdx := strings.Index(f.Name, "."); dotIdx != -1 {
|
||||
potentialContext := f.Name[:dotIdx]
|
||||
if fc, ok := fieldContexts[potentialContext]; ok && fc != FieldContextUnspecified {
|
||||
f.Name = f.Name[dotIdx+1:]
|
||||
f.FieldContext = fc
|
||||
|
||||
// Step 2a: Handle special case for log.body.* fields
|
||||
if f.FieldContext == FieldContextLog && strings.HasPrefix(f.Name, BodyJSONStringSearchPrefix) {
|
||||
f.FieldContext = FieldContextBody
|
||||
f.Name = strings.TrimPrefix(f.Name, BodyJSONStringSearchPrefix)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
realKey := strings.Join(keyTextParts, ".")
|
||||
}
|
||||
|
||||
fieldKeySelector := TelemetryFieldKey{
|
||||
Name: realKey,
|
||||
}
|
||||
// GetFieldKeyFromKeyText returns a TelemetryFieldKey from a key text.
|
||||
// The key text is expected to be in the format of `fieldContext.fieldName:fieldDataType` in the search query.
|
||||
// Both fieldContext and :fieldDataType are optional.
|
||||
// fieldName can contain dots and can start with a dot (e.g., ".http_code").
|
||||
// Special cases:
|
||||
// - When key exactly matches a field context name (e.g., "body", "attribute"), use unspecified context
|
||||
// - When key starts with "body." prefix, use "body" as context with remainder as field name
|
||||
func GetFieldKeyFromKeyText(key string) TelemetryFieldKey {
|
||||
var explicitFieldDataType FieldDataType = FieldDataTypeUnspecified
|
||||
var fieldName string
|
||||
|
||||
if explicitFieldContextProvided {
|
||||
fieldKeySelector.FieldContext = explicitFieldContext
|
||||
// Step 1: Parse data type from the right (after the last ":")
|
||||
var keyWithoutDataType string
|
||||
if colonIdx := strings.LastIndex(key, ":"); colonIdx != -1 {
|
||||
potentialDataType := key[colonIdx+1:]
|
||||
if dt, ok := fieldDataTypes[potentialDataType]; ok && dt != FieldDataTypeUnspecified {
|
||||
explicitFieldDataType = dt
|
||||
keyWithoutDataType = key[:colonIdx]
|
||||
} else {
|
||||
// No valid data type found, treat the entire key as the field name
|
||||
keyWithoutDataType = key
|
||||
}
|
||||
} else {
|
||||
fieldKeySelector.FieldContext = FieldContextUnspecified
|
||||
keyWithoutDataType = key
|
||||
}
|
||||
|
||||
if explicitFieldDataTypeProvided {
|
||||
fieldKeySelector.FieldDataType = explicitFieldDataType
|
||||
} else {
|
||||
fieldKeySelector.FieldDataType = FieldDataTypeUnspecified
|
||||
// Step 2: Parse field context from the left
|
||||
if dotIdx := strings.Index(keyWithoutDataType, "."); dotIdx != -1 {
|
||||
potentialContext := keyWithoutDataType[:dotIdx]
|
||||
if fc, ok := fieldContexts[potentialContext]; ok && fc != FieldContextUnspecified {
|
||||
fieldName = keyWithoutDataType[dotIdx+1:]
|
||||
|
||||
// Step 2a: Handle special case for log.body.* fields
|
||||
if fc == FieldContextLog && strings.HasPrefix(fieldName, BodyJSONStringSearchPrefix) {
|
||||
fc = FieldContextBody
|
||||
fieldName = strings.TrimPrefix(fieldName, BodyJSONStringSearchPrefix)
|
||||
}
|
||||
|
||||
return TelemetryFieldKey{
|
||||
Name: fieldName,
|
||||
FieldContext: fc,
|
||||
FieldDataType: explicitFieldDataType,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fieldKeySelector
|
||||
// Step 3: No context found, entire key is the field name
|
||||
return TelemetryFieldKey{
|
||||
Name: keyWithoutDataType,
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: explicitFieldDataType,
|
||||
}
|
||||
}
|
||||
|
||||
func TelemetryFieldKeyToText(key *TelemetryFieldKey) string {
|
||||
var sb strings.Builder
|
||||
if key.FieldContext != FieldContextUnspecified {
|
||||
sb.WriteString(key.FieldContext.StringValue())
|
||||
sb.WriteString(".")
|
||||
}
|
||||
sb.WriteString(key.Name)
|
||||
if key.FieldDataType != FieldDataTypeUnspecified {
|
||||
sb.WriteString(":")
|
||||
sb.WriteString(key.FieldDataType.StringValue())
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func FieldKeyToMaterializedColumnName(key *TelemetryFieldKey) string {
|
||||
|
||||
@@ -83,12 +83,314 @@ func TestGetFieldKeyFromKeyText(t *testing.T) {
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for log.body.status - should use log context with body.status as field name
|
||||
{
|
||||
keyText: "log.body.status",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "status",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for log.body. with data type
|
||||
{
|
||||
keyText: "log.body.status_code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "status_code",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
// Test case for log.body. with nested field name
|
||||
{
|
||||
keyText: "log.body.http.status.code",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status.code",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for body. prefix - should use body as context
|
||||
{
|
||||
keyText: "body.http.status.code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status.code",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
// Test case for body. prefix without data type - should use body as context
|
||||
{
|
||||
keyText: "body.status",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "status",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for body. prefix with array data type - should use body as context
|
||||
{
|
||||
keyText: "body.tags:[]string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "tags",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeArrayString,
|
||||
},
|
||||
},
|
||||
// Test case for body. prefix with array data type (int64)
|
||||
{
|
||||
keyText: "body.ids:[]int64",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "ids",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeArrayInt64,
|
||||
},
|
||||
},
|
||||
// Test case for body. prefix with nested field and array data type
|
||||
{
|
||||
keyText: "body.http.headers:[]string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.headers",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeArrayString,
|
||||
},
|
||||
},
|
||||
// Test case for just "body" - should use unspecified context with body as field name
|
||||
{
|
||||
keyText: "body",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for just "body" with data type
|
||||
{
|
||||
keyText: "body:string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
// Test case for log.body (without trailing dot) - should keep log context
|
||||
{
|
||||
keyText: "log.body",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: FieldContextLog,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for log.body with data type - should keep log context
|
||||
{
|
||||
keyText: "log.body:string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: FieldContextLog,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
// Test case for field name with dots and no context
|
||||
{
|
||||
keyText: "http.status.code",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status.code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for field name with dots and data type but no context
|
||||
{
|
||||
keyText: "http.status.code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status.code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
// Test case for just field name
|
||||
{
|
||||
keyText: "fieldName",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "fieldName",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for just field name with data type
|
||||
{
|
||||
keyText: "fieldName:string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "fieldName",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
// Test case for field starting with dot
|
||||
{
|
||||
keyText: ".http_code",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: ".http_code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for field starting with dot and with data type
|
||||
{
|
||||
keyText: ".http_code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: ".http_code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
// Test case for field starting with dot and nested field name
|
||||
{
|
||||
keyText: ".http.status.code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: ".http.status.code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
result := GetFieldKeyFromKeyText(testCase.keyText)
|
||||
if !reflect.DeepEqual(result, testCase.expected) {
|
||||
t.Errorf("expected %v, got %v", testCase.expected, result)
|
||||
t.Errorf("For key '%s': expected %v, got %v", testCase.keyText, testCase.expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
input TelemetryFieldKey
|
||||
expected TelemetryFieldKey
|
||||
}{
|
||||
{
|
||||
name: "Normalize key with context and data type in name",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "resource.service.name:string",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: FieldContextResource,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize key with existing context and data type",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: FieldContextResource,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: FieldContextResource,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize body field",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "body.status:int",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "status",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize log.body.* field",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "log.body.status",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "status",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize field with no context",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "http.status.code:int",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status.code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize exact body keyword",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize span field",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "span.kind:string",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "kind",
|
||||
FieldContext: FieldContextSpan,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize attribute field",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "attribute.http.method",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.method",
|
||||
FieldContext: FieldContextAttribute,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize field starting with dot",
|
||||
input: TelemetryFieldKey{
|
||||
Name: ".http_code:int",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: ".http_code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize array data type field",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "body.tags:[]string",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "tags",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeArrayString,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
key := tc.input
|
||||
key.Normalize()
|
||||
if !reflect.DeepEqual(key, tc.expected) {
|
||||
t.Errorf("Expected %v, got %v", tc.expected, key)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -377,7 +377,7 @@ def idp_login(driver: webdriver.Chrome) -> Callable[[str, str], None]:
|
||||
# Fill the email in username field
|
||||
username_field = wait.until(EC.element_to_be_clickable((By.ID, "username")))
|
||||
username_field.send_keys(email)
|
||||
|
||||
|
||||
# Fill the password in password field
|
||||
password_field = wait.until(EC.element_to_be_clickable((By.ID, "password")))
|
||||
password_field.send_keys(password)
|
||||
|
||||
@@ -66,7 +66,7 @@ def signoz( # pylint: disable=too-many-arguments,too-many-positional-arguments
|
||||
"SIGNOZ_PROMETHEUS_ACTIVE__QUERY__TRACKER_ENABLED": False,
|
||||
"SIGNOZ_GATEWAY_URL": gateway.container_configs["8080"].base(),
|
||||
"SIGNOZ_TOKENIZER_JWT_SECRET": "secret",
|
||||
"SIGNOZ_GLOBAL_INGESTION__URL": "https://ingest.test.signoz.cloud"
|
||||
"SIGNOZ_GLOBAL_INGESTION__URL": "https://ingest.test.signoz.cloud",
|
||||
}
|
||||
| sqlstore.env
|
||||
| clickhouse.env
|
||||
|
||||
@@ -2,6 +2,7 @@ from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
@@ -11,7 +12,9 @@ from fixtures.querier import (
|
||||
assert_minutely_bucket_values,
|
||||
find_named_result,
|
||||
index_series_by_label,
|
||||
make_query_request,
|
||||
)
|
||||
from src.querier.util import assert_identical_query_response
|
||||
|
||||
|
||||
def test_logs_list(
|
||||
@@ -396,6 +399,122 @@ def test_logs_list(
|
||||
assert "d-001" in values
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_by_context,expected_order",
|
||||
####
|
||||
# Tests:
|
||||
# 1. Query logs ordered by attribute.service.name descending
|
||||
# 2. Query logs ordered by resource.service.name descending
|
||||
# 3. Query logs ordered by service.name descending
|
||||
###
|
||||
[
|
||||
pytest.param("attribute", ["log-002", "log-001", "log-004", "log-003"]),
|
||||
pytest.param("resource", ["log-003", "log-004", "log-001", "log-002"]),
|
||||
pytest.param("", ["log-002", "log-001", "log-003", "log-004"]),
|
||||
],
|
||||
)
|
||||
def test_logs_list_with_order_by(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
order_by_context: str,
|
||||
expected_order: List[str],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 3 logs with service.name in attributes and resources
|
||||
"""
|
||||
|
||||
attribute_resource_pair = [
|
||||
[{"id": "log-001", "service.name": "c"}, {}],
|
||||
[{"id": "log-002", "service.name": "d"}, {}],
|
||||
[{"id": "log-003"}, {"service.name": "b"}],
|
||||
[{"id": "log-004"}, {"service.name": "a"}],
|
||||
]
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=datetime.now(tz=timezone.utc) - timedelta(seconds=3),
|
||||
attributes=attribute_resource_pair[i][0],
|
||||
resources=attribute_resource_pair[i][1],
|
||||
body="Log with DEBUG severity",
|
||||
severity_text="DEBUG",
|
||||
)
|
||||
for i in range(4)
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
query = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "logs",
|
||||
"order": [
|
||||
{
|
||||
"key": {
|
||||
"name": "service.name",
|
||||
"fieldContext": order_by_context,
|
||||
},
|
||||
"direction": "desc",
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
query_with_inline_context = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "logs",
|
||||
"order": [
|
||||
{
|
||||
"key": {
|
||||
"name": f"{order_by_context + '.' if order_by_context else ''}service.name",
|
||||
},
|
||||
"direction": "desc",
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
response = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(datetime.now(tz=timezone.utc) - timedelta(minutes=1)).timestamp() * 1000
|
||||
),
|
||||
end_ms=int(datetime.now(tz=timezone.utc).timestamp() * 1000),
|
||||
request_type="raw",
|
||||
queries=[query],
|
||||
)
|
||||
|
||||
# Verify that both queries return the same results with specifying context with key name
|
||||
response_with_inline_context = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(datetime.now(tz=timezone.utc) - timedelta(minutes=1)).timestamp() * 1000
|
||||
),
|
||||
end_ms=int(datetime.now(tz=timezone.utc).timestamp() * 1000),
|
||||
request_type="raw",
|
||||
queries=[query_with_inline_context],
|
||||
)
|
||||
|
||||
assert_identical_query_response(response, response_with_inline_context)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
results = response.json()["data"]["data"]["results"]
|
||||
rows = results[0]["rows"]
|
||||
ids = [row["data"]["attributes_string"].get("id", "") for row in rows]
|
||||
|
||||
assert ids == expected_order
|
||||
|
||||
|
||||
def test_logs_time_series_count(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
@@ -825,6 +944,44 @@ def test_logs_time_series_count(
|
||||
},
|
||||
)
|
||||
|
||||
response_with_inline_context = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(
|
||||
datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
- timedelta(minutes=5)
|
||||
).timestamp()
|
||||
* 1000
|
||||
),
|
||||
end_ms=int(
|
||||
datetime.now(tz=timezone.utc).replace(second=0, microsecond=0).timestamp()
|
||||
* 1000
|
||||
),
|
||||
request_type="time_series",
|
||||
queries=[
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "logs",
|
||||
"stepInterval": 60,
|
||||
"disabled": False,
|
||||
"groupBy": [
|
||||
{
|
||||
"name": "resource.host.name:string",
|
||||
}
|
||||
],
|
||||
"order": [{"key": {"name": "host.name"}, "direction": "desc"}],
|
||||
"having": {"expression": ""},
|
||||
"aggregations": [{"expression": "count()"}],
|
||||
},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert_identical_query_response(response, response_with_inline_context)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, Dict, List
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
@@ -10,8 +11,10 @@ from fixtures.querier import (
|
||||
assert_minutely_bucket_values,
|
||||
find_named_result,
|
||||
index_series_by_label,
|
||||
make_query_request,
|
||||
)
|
||||
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
|
||||
from src.querier.util import assert_identical_query_response
|
||||
|
||||
|
||||
def test_traces_list(
|
||||
@@ -215,6 +218,59 @@ def test_traces_list(
|
||||
},
|
||||
)
|
||||
|
||||
# Query results with context appended to key names
|
||||
response_with_inline_context = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(datetime.now(tz=timezone.utc) - timedelta(minutes=5)).timestamp() * 1000
|
||||
),
|
||||
end_ms=int(datetime.now(tz=timezone.utc).timestamp() * 1000),
|
||||
request_type="raw",
|
||||
queries=[
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"disabled": False,
|
||||
"limit": 10,
|
||||
"offset": 0,
|
||||
"order": [
|
||||
{"key": {"name": "timestamp"}, "direction": "desc"},
|
||||
],
|
||||
"selectFields": [
|
||||
{
|
||||
"name": "resource.service.name",
|
||||
"fieldDataType": "string",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "span.name:string",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "span.duration_nano",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "span.http_method",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "span.response_status_code",
|
||||
"signal": "traces",
|
||||
},
|
||||
],
|
||||
"having": {"expression": ""},
|
||||
"aggregations": [{"expression": "count()"}],
|
||||
},
|
||||
}
|
||||
],
|
||||
)
|
||||
assert_identical_query_response(response, response_with_inline_context)
|
||||
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
@@ -420,6 +476,236 @@ def test_traces_list(
|
||||
assert set(values) == set(["topic-service", "http-service"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_by,aggregation_alias,expected_status",
|
||||
[
|
||||
# Case 1a: count by count()
|
||||
pytest.param({"name": "count()"}, "count_", HTTPStatus.OK),
|
||||
# Case 1b: count by count() with alias span.count_
|
||||
pytest.param({"name": "count()"}, "span.count_", HTTPStatus.OK),
|
||||
# Case 2a: count by count() with context specified in the key
|
||||
pytest.param(
|
||||
{"name": "count()", "fieldContext": "span"}, "count_", HTTPStatus.OK
|
||||
),
|
||||
# Case 2b: count by count() with context specified in the key with alias span.count_
|
||||
pytest.param(
|
||||
{"name": "count()", "fieldContext": "span"}, "span.count_", HTTPStatus.OK
|
||||
),
|
||||
# Case 3a: count by span.count() and context specified in the key [BAD REQUEST]
|
||||
pytest.param(
|
||||
{"name": "span.count()", "fieldContext": "span"},
|
||||
"count_",
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
),
|
||||
# Case 3b: count by span.count() and context specified in the key with alias span.count_ [BAD REQUEST]
|
||||
pytest.param(
|
||||
{"name": "span.count()", "fieldContext": "span"},
|
||||
"span.count_",
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
),
|
||||
# Case 4a: count by span.count() and context specified in the key
|
||||
pytest.param(
|
||||
{"name": "span.count()", "fieldContext": ""}, "count_", HTTPStatus.OK
|
||||
),
|
||||
# Case 4b: count by span.count() and context specified in the key with alias span.count_
|
||||
pytest.param(
|
||||
{"name": "span.count()", "fieldContext": ""}, "span.count_", HTTPStatus.OK
|
||||
),
|
||||
# Case 5a: count by count_
|
||||
pytest.param({"name": "count_"}, "count_", HTTPStatus.OK),
|
||||
# Case 5b: count by count_ with alias span.count_
|
||||
pytest.param({"name": "count_"}, "count_", HTTPStatus.OK),
|
||||
# Case 6a: count by span.count_
|
||||
pytest.param({"name": "span.count_"}, "count_", HTTPStatus.OK),
|
||||
# Case 6b: count by span.count_ with alias span.count_
|
||||
pytest.param(
|
||||
{"name": "span.count_"}, "span.count_", HTTPStatus.BAD_REQUEST
|
||||
), # THIS SHOULD BE OK BUT FAILS DUE TO LIMITATION IN CURRENT IMPLEMENTATION
|
||||
# Case 7a: count by span.count_ and context specified in the key [BAD REQUEST]
|
||||
pytest.param(
|
||||
{"name": "span.count_", "fieldContext": "span"},
|
||||
"count_",
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
),
|
||||
# Case 7b: count by span.count_ and context specified in the key with alias span.count_
|
||||
pytest.param(
|
||||
{"name": "span.count_", "fieldContext": "span"},
|
||||
"span.count_",
|
||||
HTTPStatus.OK,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_traces_aggergate_order_by_count(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
order_by: Dict[str, str],
|
||||
aggregation_alias: str,
|
||||
expected_status: HTTPStatus,
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 4 traces with different attributes.
|
||||
http-service: POST /integration -> SELECT, HTTP PATCH
|
||||
topic-service: topic publish
|
||||
|
||||
Tests:
|
||||
1. Query traces count for spans grouped by service.name and host.name
|
||||
"""
|
||||
http_service_trace_id = TraceIdGenerator.trace_id()
|
||||
http_service_span_id = TraceIdGenerator.span_id()
|
||||
http_service_db_span_id = TraceIdGenerator.span_id()
|
||||
http_service_patch_span_id = TraceIdGenerator.span_id()
|
||||
topic_service_trace_id = TraceIdGenerator.trace_id()
|
||||
topic_service_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=3),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_span_id,
|
||||
parent_span_id="",
|
||||
name="POST /integration",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3.5),
|
||||
duration=timedelta(seconds=0.5),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_db_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="SELECT",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_patch_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="HTTP PATCH",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "PATCH",
|
||||
"http.status_code": "404",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
duration=timedelta(seconds=4),
|
||||
trace_id=topic_service_trace_id,
|
||||
span_id=topic_service_span_id,
|
||||
parent_span_id="",
|
||||
name="topic publish",
|
||||
kind=TracesKind.SPAN_KIND_PRODUCER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
query = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"disabled": False,
|
||||
"order": [{"key": {"name": "count()"}, "direction": "desc"}],
|
||||
"aggregations": [{"expression": "count()", "alias": "count_"}],
|
||||
},
|
||||
}
|
||||
|
||||
# Query traces count for spans
|
||||
|
||||
query["spec"]["order"][0]["key"] = order_by
|
||||
query["spec"]["aggregations"][0]["alias"] = aggregation_alias
|
||||
response = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(datetime.now(tz=timezone.utc) - timedelta(minutes=5)).timestamp() * 1000
|
||||
),
|
||||
end_ms=int(datetime.now(tz=timezone.utc).timestamp() * 1000),
|
||||
request_type="time_series",
|
||||
queries=[query],
|
||||
)
|
||||
|
||||
assert response.status_code == expected_status
|
||||
if expected_status != HTTPStatus.OK:
|
||||
return
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
results = response.json()["data"]["data"]["results"]
|
||||
assert len(results) == 1
|
||||
aggregations = results[0]["aggregations"]
|
||||
assert len(aggregations) == 1
|
||||
series = aggregations[0]["series"]
|
||||
assert len(series) == 1
|
||||
assert series[0]["values"][0]["value"] == 4
|
||||
|
||||
|
||||
def test_traces_fill_gaps(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
|
||||
20
tests/integration/src/querier/util.py
Normal file
20
tests/integration/src/querier/util.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from http import HTTPStatus
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def assert_identical_query_response(
|
||||
response1: requests.Response, response2: requests.Response
|
||||
) -> None:
|
||||
"""
|
||||
Assert that two query responses are identical in status and data.
|
||||
"""
|
||||
assert response1.status_code == response2.status_code, "Status codes do not match"
|
||||
if response1.status_code == HTTPStatus.OK:
|
||||
assert (
|
||||
response1.json()["status"] == response2.json()["status"]
|
||||
), "Response statuses do not match"
|
||||
assert (
|
||||
response1.json()["data"]["data"]["results"]
|
||||
== response2.json()["data"]["data"]["results"]
|
||||
), "Response data do not match"
|
||||
Reference in New Issue
Block a user