mirror of
https://github.com/SigNoz/signoz.git
synced 2026-04-25 05:10:27 +01:00
Compare commits
17 Commits
ns/color-b
...
issue_4203
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed812ad1c8 | ||
|
|
3b82c2ce43 | ||
|
|
214980ddad | ||
|
|
a7b69a2678 | ||
|
|
73c82f50a9 | ||
|
|
2593c5eb91 | ||
|
|
b6b2d36baa | ||
|
|
a444a039f9 | ||
|
|
bfb050ec17 | ||
|
|
ff3e87f70c | ||
|
|
9ac02ebe00 | ||
|
|
fbdd0bebbc | ||
|
|
b2245b48fe | ||
|
|
87e654fc73 | ||
|
|
0ee31ce440 | ||
|
|
63e681b87b | ||
|
|
28375c8c1e |
@@ -11,8 +11,6 @@ global:
|
||||
external_url: <unset>
|
||||
# the url where the SigNoz backend receives telemetry data (traces, metrics, logs) from instrumented applications.
|
||||
ingestion_url: <unset>
|
||||
# the url of the SigNoz MCP server. when unset, the MCP settings page is hidden in the frontend.
|
||||
# mcp_url: <unset>
|
||||
|
||||
##################### Version #####################
|
||||
version:
|
||||
|
||||
@@ -2369,13 +2369,6 @@ components:
|
||||
$ref: '#/components/schemas/GlobaltypesIdentNConfig'
|
||||
ingestion_url:
|
||||
type: string
|
||||
mcp_url:
|
||||
nullable: true
|
||||
type: string
|
||||
required:
|
||||
- external_url
|
||||
- ingestion_url
|
||||
- mcp_url
|
||||
type: object
|
||||
GlobaltypesIdentNConfig:
|
||||
properties:
|
||||
@@ -4603,11 +4596,6 @@ components:
|
||||
type: object
|
||||
TracedetailtypesGettableWaterfallTrace:
|
||||
properties:
|
||||
aggregations:
|
||||
items:
|
||||
$ref: '#/components/schemas/TracedetailtypesSpanAggregationResult'
|
||||
nullable: true
|
||||
type: array
|
||||
endTimestampMillis:
|
||||
minimum: 0
|
||||
type: integer
|
||||
@@ -4647,11 +4635,6 @@ components:
|
||||
type: object
|
||||
TracedetailtypesPostableWaterfall:
|
||||
properties:
|
||||
aggregations:
|
||||
items:
|
||||
$ref: '#/components/schemas/TracedetailtypesSpanAggregation'
|
||||
nullable: true
|
||||
type: array
|
||||
limit:
|
||||
minimum: 0
|
||||
type: integer
|
||||
@@ -4663,32 +4646,6 @@ components:
|
||||
nullable: true
|
||||
type: array
|
||||
type: object
|
||||
TracedetailtypesSpanAggregation:
|
||||
properties:
|
||||
aggregation:
|
||||
$ref: '#/components/schemas/TracedetailtypesSpanAggregationType'
|
||||
field:
|
||||
$ref: '#/components/schemas/TelemetrytypesTelemetryFieldKey'
|
||||
type: object
|
||||
TracedetailtypesSpanAggregationResult:
|
||||
properties:
|
||||
aggregation:
|
||||
$ref: '#/components/schemas/TracedetailtypesSpanAggregationType'
|
||||
field:
|
||||
$ref: '#/components/schemas/TelemetrytypesTelemetryFieldKey'
|
||||
value:
|
||||
additionalProperties:
|
||||
minimum: 0
|
||||
type: integer
|
||||
nullable: true
|
||||
type: object
|
||||
type: object
|
||||
TracedetailtypesSpanAggregationType:
|
||||
enum:
|
||||
- spanCount
|
||||
- executionTimePercentage
|
||||
- duration
|
||||
type: string
|
||||
TracedetailtypesWaterfallSpan:
|
||||
properties:
|
||||
attributes:
|
||||
|
||||
@@ -3125,17 +3125,12 @@ export interface GlobaltypesConfigDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
external_url: string;
|
||||
external_url?: string;
|
||||
identN?: GlobaltypesIdentNConfigDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
ingestion_url: string;
|
||||
/**
|
||||
* @type string
|
||||
* @nullable true
|
||||
*/
|
||||
mcp_url: string | null;
|
||||
ingestion_url?: string;
|
||||
}
|
||||
|
||||
export interface GlobaltypesIdentNConfigDTO {
|
||||
@@ -5593,11 +5588,6 @@ export type TracedetailtypesGettableWaterfallTraceDTOServiceNameToTotalDurationM
|
||||
{ [key: string]: number } | null;
|
||||
|
||||
export interface TracedetailtypesGettableWaterfallTraceDTO {
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
aggregations?: TracedetailtypesSpanAggregationResultDTO[] | null;
|
||||
/**
|
||||
* @type integer
|
||||
* @minimum 0
|
||||
@@ -5652,11 +5642,6 @@ export interface TracedetailtypesGettableWaterfallTraceDTO {
|
||||
}
|
||||
|
||||
export interface TracedetailtypesPostableWaterfallDTO {
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
aggregations?: TracedetailtypesSpanAggregationDTO[] | null;
|
||||
/**
|
||||
* @type integer
|
||||
* @minimum 0
|
||||
@@ -5673,33 +5658,6 @@ export interface TracedetailtypesPostableWaterfallDTO {
|
||||
uncollapsedSpans?: string[] | null;
|
||||
}
|
||||
|
||||
export interface TracedetailtypesSpanAggregationDTO {
|
||||
aggregation?: TracedetailtypesSpanAggregationTypeDTO;
|
||||
field?: TelemetrytypesTelemetryFieldKeyDTO;
|
||||
}
|
||||
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
export type TracedetailtypesSpanAggregationResultDTOValue = {
|
||||
[key: string]: number;
|
||||
} | null;
|
||||
|
||||
export interface TracedetailtypesSpanAggregationResultDTO {
|
||||
aggregation?: TracedetailtypesSpanAggregationTypeDTO;
|
||||
field?: TelemetrytypesTelemetryFieldKeyDTO;
|
||||
/**
|
||||
* @type object
|
||||
* @nullable true
|
||||
*/
|
||||
value?: TracedetailtypesSpanAggregationResultDTOValue;
|
||||
}
|
||||
|
||||
export enum TracedetailtypesSpanAggregationTypeDTO {
|
||||
spanCount = 'spanCount',
|
||||
executionTimePercentage = 'executionTimePercentage',
|
||||
duration = 'duration',
|
||||
}
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
|
||||
@@ -17,7 +17,6 @@ var (
|
||||
type Config struct {
|
||||
ExternalURL *url.URL `mapstructure:"external_url"`
|
||||
IngestionURL *url.URL `mapstructure:"ingestion_url"`
|
||||
MCPURL *url.URL `mapstructure:"mcp_url"`
|
||||
}
|
||||
|
||||
func NewConfigFactory() factory.ConfigFactory {
|
||||
|
||||
@@ -31,14 +31,8 @@ func newProvider(_ context.Context, providerSettings factory.ProviderSettings, c
|
||||
}
|
||||
|
||||
func (provider *provider) GetConfig(context.Context) *globaltypes.Config {
|
||||
var mcpURL *string
|
||||
if provider.config.MCPURL != nil {
|
||||
s := provider.config.MCPURL.String()
|
||||
mcpURL = &s
|
||||
}
|
||||
|
||||
return globaltypes.NewConfig(
|
||||
globaltypes.NewEndpoint(provider.config.ExternalURL.String(), provider.config.IngestionURL.String(), mcpURL),
|
||||
globaltypes.NewEndpoint(provider.config.ExternalURL.String(), provider.config.IngestionURL.String()),
|
||||
globaltypes.NewIdentNConfig(
|
||||
globaltypes.TokenizerConfig{Enabled: provider.identNConfig.Tokenizer.Enabled},
|
||||
globaltypes.APIKeyConfig{Enabled: provider.identNConfig.APIKeyConfig.Enabled},
|
||||
|
||||
@@ -25,11 +25,6 @@ func (h *handler) GetWaterfall(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := req.Validate(); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.module.GetWaterfall(r.Context(), mux.Vars(r)["traceID"], req)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
|
||||
@@ -37,12 +37,7 @@ func (m *module) GetWaterfall(ctx context.Context, traceID string, req *tracedet
|
||||
m.config.Waterfall.MaxDepthToAutoExpand,
|
||||
)
|
||||
|
||||
aggregationResults := make([]tracedetailtypes.SpanAggregationResult, 0, len(req.Aggregations))
|
||||
for _, a := range req.Aggregations {
|
||||
aggregationResults = append(aggregationResults, waterfallTrace.GetSpanAggregation(a.Aggregation, a.Field))
|
||||
}
|
||||
|
||||
return tracedetailtypes.NewGettableWaterfallTrace(waterfallTrace, selectedSpans, uncollapsedSpans, selectedAllSpans, aggregationResults), nil
|
||||
return tracedetailtypes.NewGettableWaterfallTrace(waterfallTrace, selectedSpans, uncollapsedSpans, selectedAllSpans), nil
|
||||
}
|
||||
|
||||
// getTraceData returns the waterfall cache for the given traceID with fallback on DB.
|
||||
|
||||
@@ -260,7 +260,7 @@ func TestGetSelectedSpans_MultipleRoots(t *testing.T) {
|
||||
trace := getWaterfallTrace([]*tracedetailtypes.WaterfallSpan{root1, root2}, spanMap)
|
||||
spans, _ := trace.GetSelectedSpans([]string{"root1", "root2"}, "root1", 500, 5)
|
||||
|
||||
traceRespnose := tracedetailtypes.NewGettableWaterfallTrace(trace, spans, nil, false, nil)
|
||||
traceRespnose := tracedetailtypes.NewGettableWaterfallTrace(trace, spans, nil, false)
|
||||
|
||||
assert.Equal(t, []string{"root1", "child1", "root2", "child2"}, spanIDs(spans), "root1 subtree must precede root2 subtree")
|
||||
assert.Equal(t, "svc-a", traceRespnose.RootServiceName, "metadata comes from first root")
|
||||
@@ -567,7 +567,7 @@ func TestGetAllSpans(t *testing.T) {
|
||||
)
|
||||
trace := getWaterfallTrace([]*tracedetailtypes.WaterfallSpan{root}, nil)
|
||||
spans := trace.GetAllSpans()
|
||||
traceResponse := tracedetailtypes.NewGettableWaterfallTrace(trace, spans, nil, true, nil)
|
||||
traceResponse := tracedetailtypes.NewGettableWaterfallTrace(trace, spans, nil, true)
|
||||
assert.ElementsMatch(t, spanIDs(spans), []string{"root", "childA", "grandchildA", "leafA", "childB", "grandchildB", "leafB"})
|
||||
assert.Equal(t, "svc", traceResponse.RootServiceName)
|
||||
assert.Equal(t, "root-op", traceResponse.RootServiceEntryPoint)
|
||||
|
||||
@@ -265,6 +265,15 @@ func (q *builderQuery[T]) executeWithContext(ctx context.Context, query string,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: This should move to readAsRaw function in consume.go but for now we are keeping it here since it's only relevant for traces
|
||||
if q.spec.Signal == telemetrytypes.SignalTraces {
|
||||
if raw, ok := payload.(*qbtypes.RawData); ok {
|
||||
for _, rr := range raw.Rows {
|
||||
mergeSpanAttributeColumns(rr.Data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &qbtypes.Result{
|
||||
Type: q.kind,
|
||||
Value: payload,
|
||||
|
||||
@@ -431,6 +431,48 @@ func readAsRaw(rows driver.Rows, queryName string) (*qbtypes.RawData, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// mergeSpanAttributeColumns merges the typed ClickHouse span attribute columns
|
||||
// (attributes_string, attributes_number, attributes_bool, resources_string) into
|
||||
// unified "attributes" and "resource_attributes" keys, removing the raw columns.
|
||||
// It is a no-op if none of the raw columns are present.
|
||||
func mergeSpanAttributeColumns(data map[string]any) {
|
||||
attrStr, hasStr := data["attributes_string"]
|
||||
attrNum, hasNum := data["attributes_number"]
|
||||
attrBool, hasBool := data["attributes_bool"]
|
||||
// todo(nitya): move to resource json
|
||||
resStr, hasRes := data["resources_string"]
|
||||
|
||||
if !hasStr && !hasNum && !hasBool && !hasRes {
|
||||
return
|
||||
}
|
||||
|
||||
attributes := make(map[string]any)
|
||||
if m, ok := attrStr.(map[string]string); ok {
|
||||
for k, v := range m {
|
||||
attributes[k] = v
|
||||
}
|
||||
}
|
||||
if m, ok := attrNum.(map[string]float64); ok {
|
||||
for k, v := range m {
|
||||
attributes[k] = v
|
||||
}
|
||||
}
|
||||
if m, ok := attrBool.(map[string]bool); ok {
|
||||
for k, v := range m {
|
||||
attributes[k] = v
|
||||
}
|
||||
}
|
||||
delete(data, "attributes_string")
|
||||
delete(data, "attributes_number")
|
||||
delete(data, "attributes_bool")
|
||||
data["attributes"] = attributes
|
||||
|
||||
if m, ok := resStr.(map[string]string); ok {
|
||||
data["resource"] = m
|
||||
}
|
||||
delete(data, "resources_string")
|
||||
}
|
||||
|
||||
// numericAsFloat converts numeric types to float64 efficiently.
|
||||
func numericAsFloat(v any) float64 {
|
||||
switch x := v.(type) {
|
||||
|
||||
@@ -85,6 +85,13 @@ func (q *traceOperatorQuery) executeWithContext(ctx context.Context, query strin
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: This should move to readAsRaw function in consume.go but for now we can keep it here since it's only relevant for traces
|
||||
if raw, ok := payload.(*qbtypes.RawData); ok {
|
||||
for _, rr := range raw.Rows {
|
||||
mergeSpanAttributeColumns(rr.Data)
|
||||
}
|
||||
}
|
||||
|
||||
return &qbtypes.Result{
|
||||
Type: q.kind,
|
||||
Value: payload,
|
||||
|
||||
@@ -1154,13 +1154,7 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, orgID
|
||||
if err != nil {
|
||||
r.logger.Info("cache miss for getFlamegraphSpansForTrace", "traceID", traceID)
|
||||
|
||||
selectCols := "timestamp, duration_nano, span_id, trace_id, has_error, links as references, resource_string_service$$name, name, events"
|
||||
if len(req.RequiredFields) > 0 {
|
||||
selectCols += ", attributes_string, attributes_number, attributes_bool, resources_string"
|
||||
}
|
||||
flamegraphQuery := fmt.Sprintf("SELECT %s FROM %s.%s WHERE trace_id=$1 and ts_bucket_start>=$2 and ts_bucket_start<=$3 ORDER BY timestamp ASC, name ASC", selectCols, r.TraceDB, r.traceTableName)
|
||||
|
||||
searchScanResponses, err := r.GetSpansForTrace(ctx, traceID, flamegraphQuery)
|
||||
searchScanResponses, err := r.GetSpansForTrace(ctx, traceID, fmt.Sprintf("SELECT timestamp, duration_nano, span_id, trace_id, has_error,links as references, resource_string_service$$name, name, events FROM %s.%s WHERE trace_id=$1 and ts_bucket_start>=$2 and ts_bucket_start<=$3 ORDER BY timestamp ASC, name ASC", r.TraceDB, r.traceTableName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1199,10 +1193,6 @@ func (r *ClickHouseReader) GetFlamegraphSpansForTrace(ctx context.Context, orgID
|
||||
Children: make([]*model.FlamegraphSpan, 0),
|
||||
}
|
||||
|
||||
if len(req.RequiredFields) > 0 {
|
||||
jsonItem.SetRequestedFields(item, req.RequiredFields)
|
||||
}
|
||||
|
||||
// metadata calculation
|
||||
startTimeUnixNano := uint64(item.TimeUnixNano.UnixNano())
|
||||
if startTime == 0 || startTimeUnixNano < startTime {
|
||||
|
||||
@@ -2,8 +2,6 @@ package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
type InstantQueryMetricsParams struct {
|
||||
@@ -339,11 +337,10 @@ type GetWaterfallSpansForTraceWithMetadataParams struct {
|
||||
}
|
||||
|
||||
type GetFlamegraphSpansForTraceParams struct {
|
||||
SelectedSpanID string `json:"selectedSpanId"`
|
||||
Limit uint `json:"limit"`
|
||||
BoundaryStartTS uint64 `json:"boundaryStartTsMilli"`
|
||||
BoundaryEndTS uint64 `json:"boundarEndTsMilli"`
|
||||
RequiredFields []telemetrytypes.TelemetryFieldKey `json:"requiredFields"`
|
||||
SelectedSpanID string `json:"selectedSpanId"`
|
||||
Limit uint `json:"limit"`
|
||||
BoundaryStartTS uint64 `json:"boundaryStartTsMilli"`
|
||||
BoundaryEndTS uint64 `json:"boundarEndTsMilli"`
|
||||
}
|
||||
|
||||
type SpanFilterParams struct {
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/prometheus/promql/parser"
|
||||
"github.com/prometheus/prometheus/util/stats"
|
||||
@@ -315,30 +314,6 @@ type FlamegraphSpan struct {
|
||||
Events []Event `json:"event"`
|
||||
References []OtelSpanRef `json:"references,omitempty"`
|
||||
Children []*FlamegraphSpan `json:"children"`
|
||||
Attributes map[string]any `json:"attributes,omitempty"`
|
||||
Resource map[string]string `json:"resource,omitempty"`
|
||||
}
|
||||
|
||||
// SetRequestedFields extracts the requested attribute/resource fields from item into s.
|
||||
func (s *FlamegraphSpan) SetRequestedFields(item SpanItemV2, fields []telemetrytypes.TelemetryFieldKey) {
|
||||
for _, field := range fields {
|
||||
switch field.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
if v, ok := item.Resources_string[field.Name]; ok && v != "" {
|
||||
if s.Resource == nil {
|
||||
s.Resource = make(map[string]string)
|
||||
}
|
||||
s.Resource[field.Name] = v
|
||||
}
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
if v := item.AttributeValue(field.Name); v != nil {
|
||||
if s.Attributes == nil {
|
||||
s.Attributes = make(map[string]any)
|
||||
}
|
||||
s.Attributes[field.Name] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type GetWaterfallSpansForTraceWithMetadataResponse struct {
|
||||
|
||||
@@ -29,17 +29,3 @@ type TraceSummary struct {
|
||||
End time.Time `ch:"end"`
|
||||
NumSpans uint64 `ch:"num_spans"`
|
||||
}
|
||||
|
||||
// AttributeValue looks up an attribute across string, number, and bool maps in priority order.
|
||||
func (s SpanItemV2) AttributeValue(name string) any {
|
||||
if v, ok := s.Attributes_string[name]; ok && v != "" {
|
||||
return v
|
||||
}
|
||||
if v, ok := s.Attributes_number[name]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok := s.Attributes_bool[name]; ok {
|
||||
return v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,50 @@
|
||||
package telemetrytraces
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
// Internal Columns.
|
||||
SpanTimestampBucketStartColumn = "ts_bucket_start"
|
||||
SpanResourceFingerPrintColumn = "resource_fingerprint"
|
||||
|
||||
// Intrinsic Columns.
|
||||
SpanTimestampColumn = "timestamp"
|
||||
SpanTraceIDColumn = "trace_id"
|
||||
SpanSpanIDColumn = "span_id"
|
||||
SpanTraceStateColumn = "trace_state"
|
||||
SpanParentSpanIDColumn = "parent_span_id"
|
||||
SpanFlagsColumn = "flags"
|
||||
SpanNameColumn = "name"
|
||||
SpanKindColumn = "kind"
|
||||
SpanKindStringColumn = "kind_string"
|
||||
SpanDurationNanoColumn = "duration_nano"
|
||||
SpanStatusCodeColumn = "status_code"
|
||||
SpanStatusMessageColumn = "status_message"
|
||||
SpanStatusCodeStringColumn = "status_code_string"
|
||||
SpanEventsColumn = "events"
|
||||
SpanLinksColumn = "links"
|
||||
|
||||
// Calculated Columns.
|
||||
SpanResponseStatusCodeColumn = "response_status_code"
|
||||
SpanExternalHTTPURLColumn = "external_http_url"
|
||||
SpanHTTPURLColumn = "http_url"
|
||||
SpanExternalHTTPMethodColumn = "external_http_method"
|
||||
SpanHTTPMethodColumn = "http_method"
|
||||
SpanHTTPHostColumn = "http_host"
|
||||
SpanDBNameColumn = "db_name"
|
||||
SpanDBOperationColumn = "db_operation"
|
||||
SpanHasErrorColumn = "has_error"
|
||||
SpanIsRemoteColumn = "is_remote"
|
||||
|
||||
// Contextual Columns.
|
||||
SpanAttributesStringColumn = "attributes_string"
|
||||
SpanAttributesNumberColumn = "attributes_number"
|
||||
SpanAttributesBoolColumn = "attributes_bool"
|
||||
SpanResourcesStringColumn = "resources_string"
|
||||
)
|
||||
|
||||
var (
|
||||
IntrinsicFields = map[string]telemetrytypes.TelemetryFieldKey{
|
||||
|
||||
@@ -78,6 +78,16 @@ func TestGetFieldKeyName(t *testing.T) {
|
||||
expectedResult: "multiIf(resource.`deployment.environment` IS NOT NULL, resource.`deployment.environment`::String, `resource_string_deployment$$environment_exists`==true, `resource_string_deployment$$environment`, NULL)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Contextual map column - attributes_string without span context does not short-circuit",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: SpanAttributesStringColumn,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes_string['attributes_string']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
@@ -15,7 +14,6 @@ import (
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -86,40 +84,12 @@ func (b *traceQueryStatementBuilder) Build(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
/*
|
||||
Adding a tech debt note here:
|
||||
This piece of code is a hot fix and should be removed once we close issue: engineering-pod/issues/3622
|
||||
*/
|
||||
/*
|
||||
-------------------------------- Start of tech debt ----------------------------
|
||||
*/
|
||||
isSelectFieldsEmpty := false
|
||||
if requestType == qbtypes.RequestTypeRaw {
|
||||
|
||||
selectedFields := query.SelectFields
|
||||
|
||||
if len(selectedFields) == 0 {
|
||||
sortedKeys := maps.Keys(DefaultFields)
|
||||
slices.Sort(sortedKeys)
|
||||
for _, key := range sortedKeys {
|
||||
selectedFields = append(selectedFields, DefaultFields[key])
|
||||
}
|
||||
query.SelectFields = selectedFields
|
||||
}
|
||||
|
||||
selectFieldKeys := []string{}
|
||||
for _, field := range selectedFields {
|
||||
selectFieldKeys = append(selectFieldKeys, field.Name)
|
||||
}
|
||||
|
||||
for _, x := range []string{"timestamp", "span_id", "trace_id"} {
|
||||
if !slices.Contains(selectFieldKeys, x) {
|
||||
query.SelectFields = append(query.SelectFields, DefaultFields[x])
|
||||
}
|
||||
}
|
||||
// we are expanding here to ensure that all the conflicts are taken care in adjustKeys
|
||||
// i.e if there is a conflict we strip away context of the key in adjustKeys
|
||||
query, isSelectFieldsEmpty = b.expandRawSelectFields(query)
|
||||
}
|
||||
/*
|
||||
-------------------------------- End of tech debt ----------------------------
|
||||
*/
|
||||
|
||||
query = b.adjustKeys(ctx, keys, query, requestType)
|
||||
|
||||
@@ -128,7 +98,7 @@ func (b *traceQueryStatementBuilder) Build(
|
||||
|
||||
switch requestType {
|
||||
case qbtypes.RequestTypeRaw:
|
||||
return b.buildListQuery(ctx, q, query, start, end, keys, variables)
|
||||
return b.buildListQuery(ctx, q, query, start, end, keys, variables, isSelectFieldsEmpty)
|
||||
case qbtypes.RequestTypeTimeSeries:
|
||||
return b.buildTimeSeriesQuery(ctx, q, query, start, end, keys, variables)
|
||||
case qbtypes.RequestTypeScalar:
|
||||
@@ -292,6 +262,7 @@ func (b *traceQueryStatementBuilder) buildListQuery(
|
||||
start, end uint64,
|
||||
keys map[string][]*telemetrytypes.TelemetryFieldKey,
|
||||
variables map[string]qbtypes.VariableItem,
|
||||
isSelectFieldsEmpty bool,
|
||||
) (*qbtypes.Statement, error) {
|
||||
|
||||
var (
|
||||
@@ -306,7 +277,6 @@ func (b *traceQueryStatementBuilder) buildListQuery(
|
||||
cteArgs = append(cteArgs, args)
|
||||
}
|
||||
|
||||
// TODO: should we deprecate `SelectFields` and return everything from a span like we do for logs?
|
||||
for _, field := range query.SelectFields {
|
||||
colExpr, err := b.fm.ColumnExpressionFor(ctx, start, end, &field, keys)
|
||||
if err != nil {
|
||||
@@ -315,6 +285,13 @@ func (b *traceQueryStatementBuilder) buildListQuery(
|
||||
sb.SelectMore(colExpr)
|
||||
}
|
||||
|
||||
if isSelectFieldsEmpty {
|
||||
sb.SelectMore(SpanAttributesStringColumn)
|
||||
sb.SelectMore(SpanAttributesNumberColumn)
|
||||
sb.SelectMore(SpanAttributesBoolColumn)
|
||||
sb.SelectMore(SpanResourcesStringColumn)
|
||||
}
|
||||
|
||||
// From table
|
||||
sb.From(fmt.Sprintf("%s.%s", DBName, SpanIndexV3TableName))
|
||||
|
||||
@@ -841,3 +818,52 @@ func (b *traceQueryStatementBuilder) buildResourceFilterCTE(
|
||||
variables,
|
||||
)
|
||||
}
|
||||
|
||||
// expandRawSelectFields populates SelectFields for raw (list view) queries.
|
||||
// It must be called before adjustKeys so that normalization runs over the full set.
|
||||
// Returns the updated query and whether the original SelectFields was empty (i.e. full expansion was performed).
|
||||
func (b *traceQueryStatementBuilder) expandRawSelectFields(query qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]) (qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation], bool) {
|
||||
wasEmpty := len(query.SelectFields) == 0
|
||||
selectFields := []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: SpanTimestampColumn, FieldContext: telemetrytypes.FieldContextSpan},
|
||||
{Name: SpanTraceIDColumn, FieldContext: telemetrytypes.FieldContextSpan},
|
||||
{Name: SpanSpanIDColumn, FieldContext: telemetrytypes.FieldContextSpan},
|
||||
}
|
||||
if wasEmpty {
|
||||
// Select all intrinsic columns
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanTraceStateColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanParentSpanIDColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanFlagsColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanNameColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanKindColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanKindStringColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanDurationNanoColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanStatusCodeColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanStatusMessageColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanStatusCodeStringColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanEventsColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanLinksColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
|
||||
// select all calculated columns
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanResponseStatusCodeColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanExternalHTTPURLColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanHTTPURLColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanExternalHTTPMethodColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanHTTPMethodColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanHTTPHostColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanDBNameColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanDBOperationColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanHasErrorColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
selectFields = append(selectFields, telemetrytypes.TelemetryFieldKey{Name: SpanIsRemoteColumn, FieldContext: telemetrytypes.FieldContextSpan})
|
||||
} else {
|
||||
for _, field := range query.SelectFields {
|
||||
// TODO(tvats): If a user specifies attribute.timestamp in the select fields, this loop will basically ignore it, as we already added a field by default. This can be fixed once we close https://github.com/SigNoz/engineering-pod/issues/3693
|
||||
if field.Name == SpanTimestampColumn || field.Name == SpanTraceIDColumn || field.Name == SpanSpanIDColumn {
|
||||
continue
|
||||
}
|
||||
selectFields = append(selectFields, field)
|
||||
}
|
||||
}
|
||||
query.SelectFields = selectFields
|
||||
return query, wasEmpty
|
||||
}
|
||||
|
||||
@@ -436,7 +436,7 @@ func TestStatementBuilderListQuery(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT name AS `name`, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name`, duration_nano AS `duration_nano`, `attribute_number_cart$$items_count` AS `cart.items_count`, timestamp AS `timestamp`, span_id AS `span_id`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp AS `timestamp`, trace_id AS `trace_id`, span_id AS `span_id`, name AS `name`, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name`, duration_nano AS `duration_nano`, `attribute_number_cart$$items_count` AS `cart.items_count` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
|
||||
Args: []any{"redis-manual", "%service.name%", "%service.name\":\"redis-manual%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -465,7 +465,7 @@ func TestStatementBuilderListQuery(t *testing.T) {
|
||||
Limit: 10,
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT duration_nano AS `duration_nano`, name AS `name`, response_status_code AS `response_status_code`, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name`, span_id AS `span_id`, timestamp AS `timestamp`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? ORDER BY attributes_string['user.id'] AS `user.id` desc LIMIT ?",
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp AS `timestamp`, trace_id AS `trace_id`, span_id AS `span_id`, trace_state AS `trace_state`, parent_span_id AS `parent_span_id`, flags AS `flags`, name AS `name`, kind AS `kind`, kind_string AS `kind_string`, duration_nano AS `duration_nano`, status_code AS `status_code`, status_message AS `status_message`, status_code_string AS `status_code_string`, events AS `events`, links AS `links`, response_status_code AS `response_status_code`, external_http_url AS `external_http_url`, http_url AS `http_url`, external_http_method AS `external_http_method`, http_method AS `http_method`, http_host AS `http_host`, db_name AS `db_name`, db_operation AS `db_operation`, has_error AS `has_error`, is_remote AS `is_remote`, attributes_string, attributes_number, attributes_bool, resources_string FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? ORDER BY attributes_string['user.id'] AS `user.id` desc LIMIT ?",
|
||||
Args: []any{"redis-manual", "%service.name%", "%service.name\":\"redis-manual%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -509,7 +509,7 @@ func TestStatementBuilderListQuery(t *testing.T) {
|
||||
Limit: 10,
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT name AS `name`, resource_string_service$$name AS `serviceName`, duration_nano AS `durationNano`, http_method AS `httpMethod`, response_status_code AS `responseStatusCode`, timestamp AS `timestamp`, span_id AS `span_id`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp AS `timestamp`, trace_id AS `trace_id`, span_id AS `span_id`, name AS `name`, resource_string_service$$name AS `serviceName`, duration_nano AS `durationNano`, http_method AS `httpMethod`, response_status_code AS `responseStatusCode` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
|
||||
Args: []any{"redis-manual", "%service.name%", "%service.name\":\"redis-manual%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -553,7 +553,7 @@ func TestStatementBuilderListQuery(t *testing.T) {
|
||||
Limit: 10,
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT name AS `name`, resource_string_service$$name AS `serviceName`, duration_nano AS `durationNano`, http_method AS `httpMethod`, multiIf(toString(`attribute_string_mixed$$materialization$$key`) != '', toString(`attribute_string_mixed$$materialization$$key`), toString(multiIf(resource.`mixed.materialization.key` IS NOT NULL, resource.`mixed.materialization.key`::String, mapContains(resources_string, 'mixed.materialization.key'), resources_string['mixed.materialization.key'], NULL)) != '', toString(multiIf(resource.`mixed.materialization.key` IS NOT NULL, resource.`mixed.materialization.key`::String, mapContains(resources_string, 'mixed.materialization.key'), resources_string['mixed.materialization.key'], NULL)), NULL) AS `mixed.materialization.key`, timestamp AS `timestamp`, span_id AS `span_id`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp AS `timestamp`, trace_id AS `trace_id`, span_id AS `span_id`, name AS `name`, resource_string_service$$name AS `serviceName`, duration_nano AS `durationNano`, http_method AS `httpMethod`, multiIf(toString(`attribute_string_mixed$$materialization$$key`) != '', toString(`attribute_string_mixed$$materialization$$key`), toString(multiIf(resource.`mixed.materialization.key` IS NOT NULL, resource.`mixed.materialization.key`::String, mapContains(resources_string, 'mixed.materialization.key'), resources_string['mixed.materialization.key'], NULL)) != '', toString(multiIf(resource.`mixed.materialization.key` IS NOT NULL, resource.`mixed.materialization.key`::String, mapContains(resources_string, 'mixed.materialization.key'), resources_string['mixed.materialization.key'], NULL)), NULL) AS `mixed.materialization.key` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
|
||||
Args: []any{"redis-manual", "%service.name%", "%service.name\":\"redis-manual%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -598,7 +598,7 @@ func TestStatementBuilderListQuery(t *testing.T) {
|
||||
Limit: 10,
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT name AS `name`, resource_string_service$$name AS `serviceName`, duration_nano AS `durationNano`, http_method AS `httpMethod`, `attribute_string_mixed$$materialization$$key` AS `mixed.materialization.key`, timestamp AS `timestamp`, span_id AS `span_id`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
|
||||
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT timestamp AS `timestamp`, trace_id AS `trace_id`, span_id AS `span_id`, name AS `name`, resource_string_service$$name AS `serviceName`, duration_nano AS `durationNano`, http_method AS `httpMethod`, `attribute_string_mixed$$materialization$$key` AS `mixed.materialization.key` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
|
||||
Args: []any{"redis-manual", "%service.name%", "%service.name\":\"redis-manual%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -706,7 +706,7 @@ func TestStatementBuilderListQueryWithCorruptData(t *testing.T) {
|
||||
Limit: 10,
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "SELECT duration_nano AS `duration_nano`, name AS `name`, response_status_code AS `response_status_code`, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name`, span_id AS `span_id`, timestamp AS `timestamp`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
|
||||
Query: "SELECT timestamp AS `timestamp`, trace_id AS `trace_id`, span_id AS `span_id`, trace_state AS `trace_state`, parent_span_id AS `parent_span_id`, flags AS `flags`, name AS `name`, kind AS `kind`, kind_string AS `kind_string`, duration_nano AS `duration_nano`, status_code AS `status_code`, status_message AS `status_message`, status_code_string AS `status_code_string`, events AS `events`, links AS `links`, response_status_code AS `response_status_code`, external_http_url AS `external_http_url`, http_url AS `http_url`, external_http_method AS `external_http_method`, http_method AS `http_method`, http_host AS `http_host`, db_name AS `db_name`, db_operation AS `db_operation`, has_error AS `has_error`, is_remote AS `is_remote`, attributes_string, attributes_number, attributes_bool, resources_string FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
|
||||
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -739,7 +739,7 @@ func TestStatementBuilderListQueryWithCorruptData(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "SELECT duration_nano AS `duration_nano`, name AS `name`, response_status_code AS `response_status_code`, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name`, span_id AS `span_id`, timestamp AS `timestamp`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? ORDER BY timestamp AS `timestamp` asc LIMIT ?",
|
||||
Query: "SELECT timestamp AS `timestamp`, trace_id AS `trace_id`, span_id AS `span_id`, trace_state AS `trace_state`, parent_span_id AS `parent_span_id`, flags AS `flags`, name AS `name`, kind AS `kind`, kind_string AS `kind_string`, duration_nano AS `duration_nano`, status_code AS `status_code`, status_message AS `status_message`, status_code_string AS `status_code_string`, events AS `events`, links AS `links`, response_status_code AS `response_status_code`, external_http_url AS `external_http_url`, http_url AS `http_url`, external_http_method AS `external_http_method`, http_method AS `http_method`, http_host AS `http_host`, db_name AS `db_name`, db_operation AS `db_operation`, has_error AS `has_error`, is_remote AS `is_remote`, attributes_string, attributes_number, attributes_bool, resources_string FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? ORDER BY timestamp AS `timestamp` asc LIMIT ?",
|
||||
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
package globaltypes
|
||||
|
||||
type Endpoint struct {
|
||||
ExternalURL string `json:"external_url" required:"true"`
|
||||
IngestionURL string `json:"ingestion_url" required:"true"`
|
||||
MCPURL *string `json:"mcp_url" required:"true" nullable:"true"`
|
||||
ExternalURL string `json:"external_url"`
|
||||
IngestionURL string `json:"ingestion_url"`
|
||||
}
|
||||
|
||||
func NewEndpoint(externalURL, ingestionURL string, mcpURL *string) Endpoint {
|
||||
func NewEndpoint(externalURL, ingestionURL string) Endpoint {
|
||||
return Endpoint{
|
||||
ExternalURL: externalURL,
|
||||
IngestionURL: ingestionURL,
|
||||
MCPURL: mcpURL,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
package tracedetailtypes
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
const maxAggregationItems = 10
|
||||
|
||||
var ErrTooManyAggregationItems = errors.NewInvalidInputf(errors.CodeInvalidInput, "aggregations request exceeds maximum of %d items", maxAggregationItems)
|
||||
|
||||
// SpanAggregationType defines the aggregation to compute over spans grouped by a field.
|
||||
type SpanAggregationType string
|
||||
|
||||
const (
|
||||
SpanAggregationSpanCount SpanAggregationType = "spanCount"
|
||||
SpanAggregationExecutionTimePercentage SpanAggregationType = "executionTimePercentage"
|
||||
SpanAggregationDuration SpanAggregationType = "duration"
|
||||
)
|
||||
|
||||
// SpanAggregation is a single aggregation request item: which field to group by and how.
|
||||
type SpanAggregation struct {
|
||||
Field telemetrytypes.TelemetryFieldKey `json:"field"`
|
||||
Aggregation SpanAggregationType `json:"aggregation"`
|
||||
}
|
||||
|
||||
// SpanAggregationResult is the computed result for one aggregation request item.
|
||||
// Duration values are in milliseconds.
|
||||
type SpanAggregationResult struct {
|
||||
Field telemetrytypes.TelemetryFieldKey `json:"field"`
|
||||
Aggregation SpanAggregationType `json:"aggregation"`
|
||||
Value map[string]uint64 `json:"value" nullable:"true"`
|
||||
}
|
||||
|
||||
func (s SpanAggregationType) Enum() []any {
|
||||
return []any{
|
||||
SpanAggregationSpanCount,
|
||||
SpanAggregationExecutionTimePercentage,
|
||||
SpanAggregationDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func (s SpanAggregationType) isValid() bool {
|
||||
for _, v := range s.Enum() {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,240 +0,0 @@
|
||||
package tracedetailtypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// mkASpan builds a WaterfallSpan with timing and field data for analytics tests.
|
||||
func mkASpan(id string, resource map[string]string, attributes map[string]any, startNs, durationNs uint64) *WaterfallSpan {
|
||||
return &WaterfallSpan{
|
||||
SpanID: id,
|
||||
Resource: resource,
|
||||
Attributes: attributes,
|
||||
TimeUnixNano: startNs,
|
||||
DurationNano: durationNs,
|
||||
Children: make([]*WaterfallSpan, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func buildTraceFromSpans(spans ...*WaterfallSpan) *WaterfallTrace {
|
||||
spanMap := make(map[string]*WaterfallSpan, len(spans))
|
||||
var startTime, endTime uint64
|
||||
initialized := false
|
||||
for _, s := range spans {
|
||||
spanMap[s.SpanID] = s
|
||||
if !initialized || s.TimeUnixNano < startTime {
|
||||
startTime = s.TimeUnixNano
|
||||
initialized = true
|
||||
}
|
||||
if end := s.TimeUnixNano + s.DurationNano; end > endTime {
|
||||
endTime = end
|
||||
}
|
||||
}
|
||||
return NewWaterfallTrace(startTime, endTime, uint64(len(spanMap)), 0, spanMap, nil, nil, false)
|
||||
}
|
||||
|
||||
var (
|
||||
fieldServiceName = telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
}
|
||||
fieldHTTPMethod = telemetrytypes.TelemetryFieldKey{
|
||||
Name: "http.method",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
}
|
||||
fieldCached = telemetrytypes.TelemetryFieldKey{
|
||||
Name: "db.cached",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
}
|
||||
)
|
||||
|
||||
func TestGetSpanAggregation_SpanCount(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
trace *WaterfallTrace
|
||||
field telemetrytypes.TelemetryFieldKey
|
||||
want map[string]uint64
|
||||
}{
|
||||
{
|
||||
name: "counts by resource field",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", map[string]string{"service.name": "frontend"}, nil, 0, 10),
|
||||
mkASpan("s2", map[string]string{"service.name": "frontend"}, nil, 10, 5),
|
||||
mkASpan("s3", map[string]string{"service.name": "backend"}, nil, 20, 8),
|
||||
),
|
||||
field: fieldServiceName,
|
||||
want: map[string]uint64{"frontend": 2, "backend": 1},
|
||||
},
|
||||
{
|
||||
name: "counts by string attribute field",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", nil, map[string]any{"http.method": "GET"}, 0, 10),
|
||||
mkASpan("s2", nil, map[string]any{"http.method": "POST"}, 10, 5),
|
||||
mkASpan("s3", nil, map[string]any{"http.method": "GET"}, 20, 8),
|
||||
),
|
||||
field: fieldHTTPMethod,
|
||||
want: map[string]uint64{"GET": 2, "POST": 1},
|
||||
},
|
||||
{
|
||||
name: "counts by boolean attribute field",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", nil, map[string]any{"db.cached": true}, 0, 10),
|
||||
mkASpan("s2", nil, map[string]any{"db.cached": false}, 10, 5),
|
||||
mkASpan("s3", nil, map[string]any{"db.cached": true}, 20, 8),
|
||||
),
|
||||
field: fieldCached,
|
||||
want: map[string]uint64{"true": 2, "false": 1},
|
||||
},
|
||||
{
|
||||
name: "spans missing the field are excluded",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", map[string]string{"service.name": "frontend"}, nil, 0, 10),
|
||||
mkASpan("s2", map[string]string{}, nil, 10, 5), // no service.name
|
||||
mkASpan("s3", map[string]string{"service.name": "backend"}, nil, 20, 8),
|
||||
),
|
||||
field: fieldServiceName,
|
||||
want: map[string]uint64{"frontend": 1, "backend": 1},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := tc.trace.GetSpanAggregation(SpanAggregationSpanCount, tc.field)
|
||||
assert.Equal(t, tc.field, result.Field)
|
||||
assert.Equal(t, SpanAggregationSpanCount, result.Aggregation)
|
||||
assert.Equal(t, tc.want, result.Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSpanAggregation_Duration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
trace *WaterfallTrace
|
||||
field telemetrytypes.TelemetryFieldKey
|
||||
want map[string]uint64
|
||||
}{
|
||||
{
|
||||
name: "non-overlapping spans — merged equals sum",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", map[string]string{"service.name": "frontend"}, nil, 0, 100),
|
||||
mkASpan("s2", map[string]string{"service.name": "frontend"}, nil, 100, 50),
|
||||
mkASpan("s3", map[string]string{"service.name": "backend"}, nil, 0, 80),
|
||||
),
|
||||
field: fieldServiceName,
|
||||
want: map[string]uint64{"frontend": 150, "backend": 80},
|
||||
},
|
||||
{
|
||||
name: "non-overlapping attribute groups — merged equals sum",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", nil, map[string]any{"http.method": "GET"}, 0, 30),
|
||||
mkASpan("s2", nil, map[string]any{"http.method": "GET"}, 50, 20),
|
||||
mkASpan("s3", nil, map[string]any{"http.method": "POST"}, 0, 70),
|
||||
),
|
||||
field: fieldHTTPMethod,
|
||||
want: map[string]uint64{"GET": 50, "POST": 70},
|
||||
},
|
||||
{
|
||||
name: "overlapping spans — non-overlapping interval merge",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", map[string]string{"service.name": "svc"}, nil, 0, 10),
|
||||
mkASpan("s2", map[string]string{"service.name": "svc"}, nil, 5, 10),
|
||||
),
|
||||
field: fieldServiceName,
|
||||
want: map[string]uint64{"svc": 15}, // [0,10] ∪ [5,15] = [0,15]
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := tc.trace.GetSpanAggregation(SpanAggregationDuration, tc.field)
|
||||
assert.Equal(t, tc.field, result.Field)
|
||||
assert.Equal(t, SpanAggregationDuration, result.Aggregation)
|
||||
assert.Equal(t, tc.want, result.Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSpanAggregation_ExecutionTimePercentage(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
trace *WaterfallTrace
|
||||
field telemetrytypes.TelemetryFieldKey
|
||||
want map[string]uint64
|
||||
}{
|
||||
{
|
||||
// trace [0,30]: svc occupies [0,10]+[20,30]=20 → 20*100/30 = 66%
|
||||
name: "non-overlapping spans",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", map[string]string{"service.name": "svc"}, nil, 0, 10),
|
||||
mkASpan("s2", map[string]string{"service.name": "svc"}, nil, 20, 10),
|
||||
),
|
||||
field: fieldServiceName,
|
||||
want: map[string]uint64{"svc": 66},
|
||||
},
|
||||
{
|
||||
// trace [0,15]: svc [0,15]=15 → 100%
|
||||
name: "partially overlapping spans",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", map[string]string{"service.name": "svc"}, nil, 0, 10),
|
||||
mkASpan("s2", map[string]string{"service.name": "svc"}, nil, 5, 10),
|
||||
),
|
||||
field: fieldServiceName,
|
||||
want: map[string]uint64{"svc": 100},
|
||||
},
|
||||
{
|
||||
// trace [0,20]: outer absorbs inner → 100%
|
||||
name: "fully contained span",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("outer", map[string]string{"service.name": "svc"}, nil, 0, 20),
|
||||
mkASpan("inner", map[string]string{"service.name": "svc"}, nil, 5, 5),
|
||||
),
|
||||
field: fieldServiceName,
|
||||
want: map[string]uint64{"svc": 100},
|
||||
},
|
||||
{
|
||||
// trace [0,30]: svc [0,15]+[20,30]=25 → 25*100/30 = 83%
|
||||
name: "three spans with two merges",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", map[string]string{"service.name": "svc"}, nil, 0, 10),
|
||||
mkASpan("s2", map[string]string{"service.name": "svc"}, nil, 5, 10),
|
||||
mkASpan("s3", map[string]string{"service.name": "svc"}, nil, 20, 10),
|
||||
),
|
||||
field: fieldServiceName,
|
||||
want: map[string]uint64{"svc": 83},
|
||||
},
|
||||
{
|
||||
// trace [0,28]: frontend [0,15]=15 → 53%, backend [0,5]+[20,28]=13 → 46%
|
||||
name: "independent groups are computed separately",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("a1", map[string]string{"service.name": "frontend"}, nil, 0, 10),
|
||||
mkASpan("a2", map[string]string{"service.name": "frontend"}, nil, 5, 10),
|
||||
mkASpan("b1", map[string]string{"service.name": "backend"}, nil, 0, 5),
|
||||
mkASpan("b2", map[string]string{"service.name": "backend"}, nil, 20, 8),
|
||||
),
|
||||
field: fieldServiceName,
|
||||
want: map[string]uint64{"frontend": 53, "backend": 46},
|
||||
},
|
||||
{
|
||||
// trace [100,150]: svc [100,150]=50 → 100%
|
||||
name: "single span",
|
||||
trace: buildTraceFromSpans(
|
||||
mkASpan("s1", map[string]string{"service.name": "svc"}, nil, 100, 50),
|
||||
),
|
||||
field: fieldServiceName,
|
||||
want: map[string]uint64{"svc": 100},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := tc.trace.GetSpanAggregation(SpanAggregationExecutionTimePercentage, tc.field)
|
||||
assert.Equal(t, tc.field, result.Field)
|
||||
assert.Equal(t, SpanAggregationExecutionTimePercentage, result.Aggregation)
|
||||
assert.Equal(t, tc.want, result.Value)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,13 +2,11 @@ package tracedetailtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,27 +21,9 @@ var ErrTraceNotFound = errors.NewNotFoundf(errors.CodeNotFound, "trace not found
|
||||
|
||||
// PostableWaterfall is the request body for the v3 waterfall API.
|
||||
type PostableWaterfall struct {
|
||||
SelectedSpanID string `json:"selectedSpanId"`
|
||||
UncollapsedSpans []string `json:"uncollapsedSpans"`
|
||||
Limit uint `json:"limit"`
|
||||
Aggregations []SpanAggregation `json:"aggregations"`
|
||||
}
|
||||
|
||||
func (p *PostableWaterfall) Validate() error {
|
||||
if len(p.Aggregations) > maxAggregationItems {
|
||||
return ErrTooManyAggregationItems
|
||||
}
|
||||
for _, a := range p.Aggregations {
|
||||
if !a.Aggregation.isValid() {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "unknown aggregation type: %q", a.Aggregation)
|
||||
}
|
||||
fc := a.Field.FieldContext
|
||||
if fc != telemetrytypes.FieldContextResource && fc != telemetrytypes.FieldContextAttribute {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "aggregation field context must be %q or %q, got %q",
|
||||
telemetrytypes.FieldContextResource, telemetrytypes.FieldContextAttribute, fc)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
SelectedSpanID string `json:"selectedSpanId"`
|
||||
UncollapsedSpans []string `json:"uncollapsedSpans"`
|
||||
Limit uint `json:"limit"`
|
||||
}
|
||||
|
||||
// Event represents a span event.
|
||||
@@ -180,24 +160,7 @@ func (ws *WaterfallSpan) GetSubtreeNodeCount() uint64 {
|
||||
return count
|
||||
}
|
||||
|
||||
// FieldValue returns the string representation of field's value on this span for grouping.
|
||||
// The bool reports whether the field was present with a non-empty value.
|
||||
func (ws *WaterfallSpan) FieldValue(field telemetrytypes.TelemetryFieldKey) (string, bool) {
|
||||
switch field.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
v := ws.Resource[field.Name]
|
||||
return v, v != ""
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
v, ok := ws.Attributes[field.Name]
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
str := fmt.Sprintf("%v", v)
|
||||
return str, str != ""
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// getPreOrderedSpans returns spans in pre-order, uncollapsedSpanIDs must be pre-computed.
|
||||
func (ws *WaterfallSpan) getPreOrderedSpans(uncollapsedSpanIDs map[string]struct{}, selectAll bool, level uint64) []*WaterfallSpan {
|
||||
result := []*WaterfallSpan{ws.GetWithoutChildren(level)}
|
||||
_, isUncollapsed := uncollapsedSpanIDs[ws.SpanID]
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/cachetypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
type TraceSummary struct {
|
||||
@@ -32,19 +31,17 @@ type WaterfallTrace struct {
|
||||
|
||||
// GettableWaterfallTrace is the response for the v3 waterfall API.
|
||||
type GettableWaterfallTrace struct {
|
||||
StartTimestampMillis uint64 `json:"startTimestampMillis"`
|
||||
EndTimestampMillis uint64 `json:"endTimestampMillis"`
|
||||
RootServiceName string `json:"rootServiceName"`
|
||||
RootServiceEntryPoint string `json:"rootServiceEntryPoint"`
|
||||
TotalSpansCount uint64 `json:"totalSpansCount"`
|
||||
TotalErrorSpansCount uint64 `json:"totalErrorSpansCount"`
|
||||
// Deprecated: use Aggregations with SpanAggregationExecutionTimePercentage on the service.name field instead.
|
||||
ServiceNameToTotalDurationMap map[string]uint64 `json:"serviceNameToTotalDurationMap"`
|
||||
Spans []*WaterfallSpan `json:"spans"`
|
||||
HasMissingSpans bool `json:"hasMissingSpans"`
|
||||
UncollapsedSpans []string `json:"uncollapsedSpans"`
|
||||
HasMore bool `json:"hasMore"`
|
||||
Aggregations []SpanAggregationResult `json:"aggregations"`
|
||||
StartTimestampMillis uint64 `json:"startTimestampMillis"`
|
||||
EndTimestampMillis uint64 `json:"endTimestampMillis"`
|
||||
RootServiceName string `json:"rootServiceName"`
|
||||
RootServiceEntryPoint string `json:"rootServiceEntryPoint"`
|
||||
TotalSpansCount uint64 `json:"totalSpansCount"`
|
||||
TotalErrorSpansCount uint64 `json:"totalErrorSpansCount"`
|
||||
ServiceNameToTotalDurationMap map[string]uint64 `json:"serviceNameToTotalDurationMap"`
|
||||
Spans []*WaterfallSpan `json:"spans"`
|
||||
HasMissingSpans bool `json:"hasMissingSpans"`
|
||||
UncollapsedSpans []string `json:"uncollapsedSpans"`
|
||||
HasMore bool `json:"hasMore"`
|
||||
}
|
||||
|
||||
// NewWaterfallTrace constructs a WaterfallTrace from processed span data.
|
||||
@@ -243,13 +240,12 @@ func (wt *WaterfallTrace) UnmarshalBinary(data []byte) error {
|
||||
return json.Unmarshal(data, wt)
|
||||
}
|
||||
|
||||
// NewGettableWaterfallTrace constructs a GettableWaterfallTrace from processed trace data and selected spans.
|
||||
// NewGettableWaterfallTrace constructs a WaterfallResponse from processed trace data and selected spans.
|
||||
func NewGettableWaterfallTrace(
|
||||
traceData *WaterfallTrace,
|
||||
selectedSpans []*WaterfallSpan,
|
||||
uncollapsedSpans []string,
|
||||
selectAllSpans bool,
|
||||
aggregations []SpanAggregationResult,
|
||||
) *GettableWaterfallTrace {
|
||||
var rootServiceName, rootServiceEntryPoint string
|
||||
if len(traceData.TraceRoots) > 0 {
|
||||
@@ -267,15 +263,6 @@ func NewGettableWaterfallTrace(
|
||||
span.TimeUnixNano = span.TimeUnixNano / 1_000_000
|
||||
}
|
||||
|
||||
// duration values are in nanoseconds; convert in-place to milliseconds.
|
||||
for i := range aggregations {
|
||||
if aggregations[i].Aggregation == SpanAggregationDuration {
|
||||
for k, v := range aggregations[i].Value {
|
||||
aggregations[i].Value[k] = v / 1_000_000
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &GettableWaterfallTrace{
|
||||
Spans: selectedSpans,
|
||||
UncollapsedSpans: uncollapsedSpans,
|
||||
@@ -288,7 +275,6 @@ func NewGettableWaterfallTrace(
|
||||
ServiceNameToTotalDurationMap: serviceDurationsMillis,
|
||||
HasMissingSpans: traceData.HasMissingSpans,
|
||||
HasMore: !selectAllSpans,
|
||||
Aggregations: aggregations,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,82 +307,29 @@ func calculateServiceTime(spanIDToSpanNodeMap map[string]*WaterfallSpan) map[str
|
||||
|
||||
totalTimes := make(map[string]uint64)
|
||||
for service, spans := range serviceSpans {
|
||||
totalTimes[service] = mergeSpanIntervals(spans)
|
||||
sort.Slice(spans, func(i, j int) bool {
|
||||
return spans[i].TimeUnixNano < spans[j].TimeUnixNano
|
||||
})
|
||||
|
||||
currentStart := spans[0].TimeUnixNano
|
||||
currentEnd := currentStart + spans[0].DurationNano
|
||||
total := uint64(0)
|
||||
|
||||
for _, span := range spans[1:] {
|
||||
startNano := span.TimeUnixNano
|
||||
endNano := startNano + span.DurationNano
|
||||
if currentEnd >= startNano {
|
||||
if endNano > currentEnd {
|
||||
currentEnd = endNano
|
||||
}
|
||||
} else {
|
||||
total += currentEnd - currentStart
|
||||
currentStart = startNano
|
||||
currentEnd = endNano
|
||||
}
|
||||
}
|
||||
total += currentEnd - currentStart
|
||||
totalTimes[service] = total
|
||||
}
|
||||
return totalTimes
|
||||
}
|
||||
|
||||
// mergeSpanIntervals computes non-overlapping execution time for a set of spans.
|
||||
func mergeSpanIntervals(spans []*WaterfallSpan) uint64 {
|
||||
if len(spans) == 0 {
|
||||
return 0
|
||||
}
|
||||
sort.Slice(spans, func(i, j int) bool {
|
||||
return spans[i].TimeUnixNano < spans[j].TimeUnixNano
|
||||
})
|
||||
|
||||
currentStart := spans[0].TimeUnixNano
|
||||
currentEnd := currentStart + spans[0].DurationNano
|
||||
total := uint64(0)
|
||||
|
||||
for _, span := range spans[1:] {
|
||||
startNano := span.TimeUnixNano
|
||||
endNano := startNano + span.DurationNano
|
||||
if currentEnd >= startNano {
|
||||
if endNano > currentEnd {
|
||||
currentEnd = endNano
|
||||
}
|
||||
} else {
|
||||
total += currentEnd - currentStart
|
||||
currentStart = startNano
|
||||
currentEnd = endNano
|
||||
}
|
||||
}
|
||||
return total + (currentEnd - currentStart)
|
||||
}
|
||||
|
||||
// GetSpanAggregation computes one aggregation result over all spans in the trace.
|
||||
// Duration values are returned in nanoseconds; callers convert to milliseconds as needed.
|
||||
func (wt *WaterfallTrace) GetSpanAggregation(aggregation SpanAggregationType, field telemetrytypes.TelemetryFieldKey) SpanAggregationResult {
|
||||
result := SpanAggregationResult{
|
||||
Field: field,
|
||||
Aggregation: aggregation,
|
||||
Value: make(map[string]uint64),
|
||||
}
|
||||
|
||||
switch aggregation {
|
||||
case SpanAggregationSpanCount:
|
||||
for _, span := range wt.SpanIDToSpanNodeMap {
|
||||
if key, ok := span.FieldValue(field); ok {
|
||||
result.Value[key]++
|
||||
}
|
||||
}
|
||||
|
||||
case SpanAggregationDuration:
|
||||
spansByField := make(map[string][]*WaterfallSpan)
|
||||
for _, span := range wt.SpanIDToSpanNodeMap {
|
||||
if key, ok := span.FieldValue(field); ok {
|
||||
spansByField[key] = append(spansByField[key], span)
|
||||
}
|
||||
}
|
||||
for key, spans := range spansByField {
|
||||
result.Value[key] = mergeSpanIntervals(spans)
|
||||
}
|
||||
|
||||
case SpanAggregationExecutionTimePercentage:
|
||||
traceDuration := wt.EndTime - wt.StartTime
|
||||
spansByField := make(map[string][]*WaterfallSpan)
|
||||
for _, span := range wt.SpanIDToSpanNodeMap {
|
||||
if key, ok := span.FieldValue(field); ok {
|
||||
spansByField[key] = append(spansByField[key], span)
|
||||
}
|
||||
}
|
||||
if traceDuration > 0 {
|
||||
for key, spans := range spansByField {
|
||||
result.Value[key] = mergeSpanIntervals(spans) * 100 / traceDuration
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -481,25 +481,24 @@ def test_traces_list(
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"disabled": False,
|
||||
"selectFields": [
|
||||
{"name": "span_id"},
|
||||
{"name": "span.timestamp"},
|
||||
{"name": "trace_id"},
|
||||
],
|
||||
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
|
||||
"limit": 1,
|
||||
},
|
||||
},
|
||||
HTTPStatus.OK,
|
||||
lambda x: [
|
||||
x[3].duration_nano,
|
||||
x[3].name,
|
||||
x[3].response_status_code,
|
||||
x[3].service_name,
|
||||
x[3].span_id,
|
||||
format_timestamp(x[3].timestamp),
|
||||
x[3].trace_id,
|
||||
], # type: Callable[[List[Traces]], List[Any]]
|
||||
),
|
||||
# Case 2: order by attribute timestamp field which is there in attributes as well
|
||||
# This should break but it doesn't because attribute.timestamp gets adjusted to timestamp
|
||||
# because of default trace.timestamp gets added by default and bug in field mapper picks
|
||||
# instrinsic field
|
||||
# attribute.timestamp gets adjusted to span.timestamp
|
||||
pytest.param(
|
||||
{
|
||||
"type": "builder_query",
|
||||
@@ -507,16 +506,19 @@ def test_traces_list(
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"disabled": False,
|
||||
"order": [{"key": {"name": "attribute.timestamp"}, "direction": "desc"}],
|
||||
"selectFields": [
|
||||
{"name": "span_id"},
|
||||
{"name": "span.timestamp"},
|
||||
{"name": "trace_id"},
|
||||
],
|
||||
"order": [
|
||||
{"key": {"name": "attribute.timestamp"}, "direction": "desc"}
|
||||
],
|
||||
"limit": 1,
|
||||
},
|
||||
},
|
||||
HTTPStatus.OK,
|
||||
lambda x: [
|
||||
x[3].duration_nano,
|
||||
x[3].name,
|
||||
x[3].response_status_code,
|
||||
x[3].service_name,
|
||||
x[3].span_id,
|
||||
format_timestamp(x[3].timestamp),
|
||||
x[3].trace_id,
|
||||
@@ -542,7 +544,7 @@ def test_traces_list(
|
||||
], # type: Callable[[List[Traces]], List[Any]]
|
||||
),
|
||||
# Case 4: select attribute.timestamp with empty order by
|
||||
# This doesn't return any data because of where_clause using aliased timestamp
|
||||
# This returns the one span which has attribute.timestamp
|
||||
pytest.param(
|
||||
{
|
||||
"type": "builder_query",
|
||||
@@ -556,7 +558,11 @@ def test_traces_list(
|
||||
},
|
||||
},
|
||||
HTTPStatus.OK,
|
||||
lambda x: [], # type: Callable[[List[Traces]], List[Any]]
|
||||
lambda x: [
|
||||
x[0].span_id,
|
||||
format_timestamp(x[0].timestamp),
|
||||
x[0].trace_id,
|
||||
], # type: Callable[[List[Traces]], List[Any]]
|
||||
),
|
||||
# Case 5: select timestamp with timestamp order by
|
||||
pytest.param(
|
||||
@@ -693,6 +699,112 @@ def test_traces_list_with_corrupt_data(
|
||||
assert data[key] == value
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"select_fields,status_code,expected_keys",
|
||||
[
|
||||
pytest.param(
|
||||
[],
|
||||
HTTPStatus.OK,
|
||||
[
|
||||
# all intrinsic column
|
||||
"timestamp",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"trace_state",
|
||||
"parent_span_id",
|
||||
"flags",
|
||||
"name",
|
||||
"kind",
|
||||
"kind_string",
|
||||
"duration_nano",
|
||||
"status_code",
|
||||
"status_message",
|
||||
"status_code_string",
|
||||
"events",
|
||||
"links",
|
||||
# all calculated columns
|
||||
"response_status_code",
|
||||
"external_http_url",
|
||||
"http_url",
|
||||
"external_http_method",
|
||||
"http_method",
|
||||
"http_host",
|
||||
"db_name",
|
||||
"db_operation",
|
||||
"has_error",
|
||||
"is_remote",
|
||||
# all contextual columns (merged in response layer)
|
||||
"attributes",
|
||||
"resource",
|
||||
],
|
||||
),
|
||||
pytest.param(
|
||||
[
|
||||
{"name": "service.name"},
|
||||
],
|
||||
HTTPStatus.OK,
|
||||
["timestamp", "trace_id", "span_id", "service.name"],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_traces_list_with_select_fields(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
select_fields: List[dict],
|
||||
status_code: HTTPStatus,
|
||||
expected_keys: List[str],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 4 traces with different attributes.
|
||||
|
||||
Tests:
|
||||
1. Empty select fields should return all the fields.
|
||||
2. Non empty select field should return the select field along with timestamp, trace_id and span_id.
|
||||
"""
|
||||
traces = (
|
||||
generate_traces_with_corrupt_metadata()
|
||||
) # using this as the data doesn't matter
|
||||
|
||||
insert_traces(traces)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
payload = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"selectFields": select_fields,
|
||||
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
|
||||
"limit": 1,
|
||||
},
|
||||
}
|
||||
|
||||
response = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(datetime.now(tz=UTC) - timedelta(minutes=5)).timestamp() * 1000
|
||||
),
|
||||
end_ms=int(datetime.now(tz=UTC).timestamp() * 1000),
|
||||
request_type="raw",
|
||||
queries=[payload],
|
||||
)
|
||||
assert response.status_code == status_code
|
||||
|
||||
if response.status_code == HTTPStatus.OK:
|
||||
data = response.json()
|
||||
assert len(data["data"]["data"]["results"][0]["rows"][0]["data"].keys()) == len(
|
||||
expected_keys
|
||||
)
|
||||
assert set(data["data"]["data"]["results"][0]["rows"][0]["data"].keys()) == set(
|
||||
expected_keys
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_by,aggregation_alias,expected_status",
|
||||
[
|
||||
|
||||
Reference in New Issue
Block a user