Compare commits

..

1 Commits

Author SHA1 Message Date
eKuG
cdeb0af23d feat: added span_count using the MV and index v3 traces table 2024-12-19 18:14:41 +05:30
12 changed files with 125 additions and 116 deletions

View File

@@ -150,8 +150,7 @@ services:
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true",
"--use-trace-new-schema=true"
"--use-logs-new-schema=true"
]
# ports:
# - "6060:6060" # pprof port

View File

@@ -110,7 +110,6 @@ exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion:

View File

@@ -25,8 +25,7 @@ services:
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true",
"--use-trace-new-schema=true"
"--use-logs-new-schema=true"
]
ports:
- "6060:6060"

View File

@@ -167,8 +167,7 @@ services:
command:
[
"-config=/root/config/prometheus.yml",
"--use-logs-new-schema=true",
"--use-trace-new-schema=true"
"--use-logs-new-schema=true"
]
# ports:
# - "6060:6060" # pprof port

View File

@@ -173,8 +173,7 @@ services:
[
"-config=/root/config/prometheus.yml",
"-gateway-url=https://api.staging.signoz.cloud",
"--use-logs-new-schema=true",
"--use-trace-new-schema=true"
"--use-logs-new-schema=true"
]
# ports:
# - "6060:6060" # pprof port

View File

@@ -119,7 +119,6 @@ exporters:
clickhousetraces:
datasource: tcp://clickhouse:9000/signoz_traces
low_cardinal_exception_grouping: ${env:LOW_CARDINAL_EXCEPTION_GROUPING}
use_new_schema: true
clickhousemetricswrite:
endpoint: tcp://clickhouse:9000/signoz_metrics
resource_to_telemetry_conversion:

View File

@@ -9,25 +9,25 @@ func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queu
query := fmt.Sprintf(`
WITH consumer_query AS (
SELECT
resource_string_service$$name,
serviceName,
quantile(0.99)(durationNano) / 1000000 AS p99,
COUNT(*) AS total_requests,
sumIf(1, status_code = 2) AS error_count,
avg(CASE WHEN has(attributes_number, 'messaging.message.body.size') THEN attributes_number['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size
FROM signoz_traces.distributed_signoz_index_v3
sumIf(1, statusCode = 2) AS error_count,
avg(CASE WHEN has(numberTagMap, 'messaging.message.body.size') THEN numberTagMap['messaging.message.body.size'] ELSE NULL END) AS avg_msg_size
FROM signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 5
AND attribute_string_messaging$$system = '%s'
AND attributes_string['messaging.destination.name'] = '%s'
AND attributes_string['messaging.destination.partition.id'] = '%s'
AND attributes_string['messaging.kafka.consumer.group'] = '%s'
GROUP BY resource_string_service$$name
AND msgSystem = '%s'
AND stringTagMap['messaging.destination.name'] = '%s'
AND stringTagMap['messaging.destination.partition.id'] = '%s'
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
GROUP BY serviceName
)
SELECT
resource_string_service$$name AS service_name,
serviceName AS service_name,
p99,
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
COALESCE(total_requests / %d, 0) AS throughput,
@@ -35,7 +35,7 @@ SELECT
FROM
consumer_query
ORDER BY
resource_string_service$$name;
serviceName;
`, start, end, queueType, topic, partition, consumerGroup, timeRange)
return query
}
@@ -48,14 +48,14 @@ WITH partition_query AS (
SELECT
quantile(0.99)(durationNano) / 1000000 AS p99,
count(*) AS total_requests,
attributes_string['messaging.destination.name'] AS topic,
attributes_string['messaging.destination.partition.id'] AS partition
FROM signoz_traces.distributed_signoz_index_v3
stringTagMap['messaging.destination.name'] AS topic,
stringTagMap['messaging.destination.partition.id'] AS partition
FROM signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 4
AND attribute_string_messaging$$system = '%s'
AND msgSystem = '%s'
GROUP BY topic, partition
)
@@ -78,25 +78,25 @@ func generateConsumerPartitionLatencySQL(start, end int64, topic, partition, que
query := fmt.Sprintf(`
WITH consumer_pl AS (
SELECT
attributes_string['messaging.kafka.consumer.group'] AS consumer_group,
resource_string_service$$name,
stringTagMap['messaging.kafka.consumer.group'] AS consumer_group,
serviceName,
quantile(0.99)(durationNano) / 1000000 AS p99,
COUNT(*) AS total_requests,
sumIf(1, status_code = 2) AS error_count
FROM signoz_traces.distributed_signoz_index_v3
sumIf(1, statusCode = 2) AS error_count
FROM signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 5
AND attribute_string_messaging$$system = '%s'
AND attributes_string['messaging.destination.name'] = '%s'
AND attributes_string['messaging.destination.partition.id'] = '%s'
GROUP BY consumer_group, resource_string_service$$name
AND msgSystem = '%s'
AND stringTagMap['messaging.destination.name'] = '%s'
AND stringTagMap['messaging.destination.partition.id'] = '%s'
GROUP BY consumer_group, serviceName
)
SELECT
consumer_group,
resource_string_service$$name AS service_name,
serviceName AS service_name,
p99,
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
COALESCE(total_requests / %d, 0) AS throughput
@@ -115,23 +115,23 @@ func generateProducerPartitionThroughputSQL(start, end int64, queueType string)
query := fmt.Sprintf(`
WITH producer_latency AS (
SELECT
resource_string_service$$name,
serviceName,
quantile(0.99)(durationNano) / 1000000 AS p99,
attributes_string['messaging.destination.name'] AS topic,
stringTagMap['messaging.destination.name'] AS topic,
COUNT(*) AS total_requests,
sumIf(1, status_code = 2) AS error_count
FROM signoz_traces.distributed_signoz_index_v3
sumIf(1, statusCode = 2) AS error_count
FROM signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 4
AND attribute_string_messaging$$system = '%s'
GROUP BY topic, resource_string_service$$name
AND msgSystem = '%s'
GROUP BY topic, serviceName
)
SELECT
topic,
resource_string_service$$name AS service_name,
serviceName AS service_name,
p99,
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
COALESCE(total_requests / %d, 0) AS throughput
@@ -148,17 +148,17 @@ func generateProducerTopicLatencySQL(start, end int64, topic, service, queueType
WITH consumer_latency AS (
SELECT
quantile(0.99)(durationNano) / 1000000 AS p99,
attributes_string['messaging.destination.partition.id'] AS partition,
stringTagMap['messaging.destination.partition.id'] AS partition,
COUNT(*) AS total_requests,
sumIf(1, status_code = 2) AS error_count
FROM signoz_traces.distributed_signoz_index_v3
sumIf(1, statusCode = 2) AS error_count
FROM signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 4
AND resource_string_service$$name = '%s'
AND attribute_string_messaging$$system = '%s'
AND attributes_string['messaging.destination.name'] = '%s'
AND serviceName = '%s'
AND msgSystem = '%s'
AND stringTagMap['messaging.destination.name'] = '%s'
GROUP BY partition
)
@@ -179,24 +179,24 @@ func generateConsumerLatencySQL(start, end int64, queueType string) string {
query := fmt.Sprintf(`
WITH consumer_latency AS (
SELECT
resource_string_service$$name,
attributes_string['messaging.destination.name'] AS topic,
serviceName,
stringTagMap['messaging.destination.name'] AS topic,
quantile(0.99)(durationNano) / 1000000 AS p99,
COUNT(*) AS total_requests,
sumIf(1, status_code = 2) AS error_count,
SUM(attributes_number['messaging.message.body.size']) AS total_bytes
FROM signoz_traces.distributed_signoz_index_v3
sumIf(1, statusCode = 2) AS error_count,
SUM(numberTagMap['messaging.message.body.size']) AS total_bytes
FROM signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 5
AND attribute_string_messaging$$system = '%s'
GROUP BY topic, resource_string_service$$name
AND msgSystem = '%s'
GROUP BY topic, serviceName
)
SELECT
topic,
resource_string_service$$name AS service_name,
serviceName AS service_name,
p99,
COALESCE((error_count * 100.0) / total_requests, 0) AS error_rate,
COALESCE(total_requests / %d, 0) AS ingestion_rate,
@@ -216,17 +216,17 @@ func generateConsumerServiceLatencySQL(start, end int64, topic, service, queueTy
WITH consumer_latency AS (
SELECT
quantile(0.99)(durationNano) / 1000000 AS p99,
attributes_string['messaging.destination.partition.id'] AS partition,
stringTagMap['messaging.destination.partition.id'] AS partition,
COUNT(*) AS total_requests,
sumIf(1, status_code = 2) AS error_count
FROM signoz_traces.distributed_signoz_index_v3
sumIf(1, statusCode = 2) AS error_count
FROM signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 5
AND resource_string_service$$name = '%s'
AND attribute_string_messaging$$system = '%s'
AND attributes_string['messaging.destination.name'] = '%s'
AND serviceName = '%s'
AND msgSystem = '%s'
AND stringTagMap['messaging.destination.name'] = '%s'
GROUP BY partition
)
@@ -246,26 +246,26 @@ func generateProducerConsumerEvalSQL(start, end int64, queueType string, evalTim
query := fmt.Sprintf(`
WITH trace_data AS (
SELECT
p.resource_string_service$$name AS producer_service,
c.resource_string_service$$name AS consumer_service,
p.trace_id,
p.serviceName AS producer_service,
c.serviceName AS consumer_service,
p.traceID,
p.timestamp AS producer_timestamp,
c.timestamp AS consumer_timestamp,
p.durationNano AS durationNano,
(toUnixTimestamp64Nano(c.timestamp) - toUnixTimestamp64Nano(p.timestamp)) + p.durationNano AS time_difference
FROM
signoz_traces.distributed_signoz_index_v3 p
signoz_traces.distributed_signoz_index_v2 p
INNER JOIN
signoz_traces.distributed_signoz_index_v3 c
ON p.trace_id = c.trace_id
AND c.parent_span_id = p.span_id
signoz_traces.distributed_signoz_index_v2 c
ON p.traceID = c.traceID
AND c.parentSpanID = p.spanID
WHERE
p.kind = 4
AND c.kind = 5
AND toUnixTimestamp64Nano(p.timestamp) BETWEEN '%d' AND '%d'
AND toUnixTimestamp64Nano(c.timestamp) BETWEEN '%d' AND '%d'
AND c.attribute_string_messaging$$system = '%s'
AND p.attribute_string_messaging$$system = '%s'
AND c.msgSystem = '%s'
AND p.msgSystem = '%s'
)
SELECT
@@ -278,7 +278,7 @@ SELECT
arrayMap(x -> x.1,
arraySort(
x -> -x.2,
groupArrayIf((trace_id, time_difference), time_difference > '%d')
groupArrayIf((traceID, time_difference), time_difference > '%d')
)
),
1, 10
@@ -296,30 +296,30 @@ func generateProducerSQL(start, end int64, topic, partition, queueType string) s
query := fmt.Sprintf(`
WITH producer_query AS (
SELECT
resource_string_service$$name,
serviceName,
quantile(0.99)(durationNano) / 1000000 AS p99,
count(*) AS total_count,
sumIf(1, status_code = 2) AS error_count
FROM signoz_traces.distributed_signoz_index_v3
sumIf(1, statusCode = 2) AS error_count
FROM signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 4
AND attribute_string_messaging$$system = '%s'
AND attributes_string['messaging.destination.name'] = '%s'
AND attributes_string['messaging.destination.partition.id'] = '%s'
GROUP BY resource_string_service$$name
AND msgSystem = '%s'
AND stringTagMap['messaging.destination.name'] = '%s'
AND stringTagMap['messaging.destination.partition.id'] = '%s'
GROUP BY serviceName
)
SELECT
resource_string_service$$name AS service_name,
serviceName AS service_name,
p99,
COALESCE((error_count * 100.0) / total_count, 0) AS error_percentage,
COALESCE(total_count / %d, 0) AS throughput
FROM
producer_query
ORDER BY
resource_string_service$$name;
serviceName;
`, start, end, queueType, topic, partition, timeRange)
return query
}
@@ -328,18 +328,18 @@ func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, partit
timeRange := (end - start) / 1000000000
query := fmt.Sprintf(`
SELECT
attributes_string['messaging.client_id'] AS client_id,
attributes_string['service.instance.id'] AS service_instance_id,
resource_string_service$$name AS service_name,
stringTagMap['messaging.client_id'] AS client_id,
stringTagMap['service.instance.id'] AS service_instance_id,
serviceName AS service_name,
count(*) / %d AS throughput
FROM signoz_traces.distributed_signoz_index_v3
FROM signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND kind = 5
AND attribute_string_messaging$$system = '%s'
AND attributes_string['messaging.kafka.consumer.group'] = '%s'
AND attributes_string['messaging.destination.partition.id'] = '%s'
AND msgSystem = '%s'
AND stringTagMap['messaging.kafka.consumer.group'] = '%s'
AND stringTagMap['messaging.destination.partition.id'] = '%s'
GROUP BY service_name, client_id, service_instance_id
ORDER BY throughput DESC
`, timeRange, start, end, queueType, consumerGroup, partitionID)
@@ -350,12 +350,12 @@ func onboardProducersSQL(start, end int64, queueType string) string {
query := fmt.Sprintf(`
SELECT
COUNT(*) = 0 AS entries,
COUNT(IF(attribute_string_messaging$$system = '%s', 1, NULL)) = 0 AS queue,
COUNT(IF(msgSystem = '%s', 1, NULL)) = 0 AS queue,
COUNT(IF(kind = 4, 1, NULL)) = 0 AS kind,
COUNT(IF(has(attributes_string, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
COUNT(IF(has(attributes_string, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition
COUNT(IF(has(stringTagMap, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
COUNT(IF(has(stringTagMap, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition
FROM
signoz_traces.distributed_signoz_index_v3
signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d';`, queueType, start, end)
@@ -366,16 +366,16 @@ func onboardConsumerSQL(start, end int64, queueType string) string {
query := fmt.Sprintf(`
SELECT
COUNT(*) = 0 AS entries,
COUNT(IF(attribute_string_messaging$$system = '%s', 1, NULL)) = 0 AS queue,
COUNT(IF(msgSystem = '%s', 1, NULL)) = 0 AS queue,
COUNT(IF(kind = 5, 1, NULL)) = 0 AS kind,
COUNT(resource_string_service$$name) = 0 AS svc,
COUNT(IF(has(attributes_string, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
COUNT(IF(has(attributes_string, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition,
COUNT(IF(has(attributes_string, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup,
COUNT(IF(has(attributes_number, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize,
COUNT(IF(has(attributes_string, 'messaging.client_id'), 1, NULL)) = 0 AS clientid,
COUNT(IF(has(attributes_string, 'service.instance.id'), 1, NULL)) = 0 AS instanceid
FROM signoz_traces.distributed_signoz_index_v3
COUNT(serviceName) = 0 AS svc,
COUNT(IF(has(stringTagMap, 'messaging.destination.name'), 1, NULL)) = 0 AS destination,
COUNT(IF(has(stringTagMap, 'messaging.destination.partition.id'), 1, NULL)) = 0 AS partition,
COUNT(IF(has(stringTagMap, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup,
COUNT(IF(has(numberTagMap, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize,
COUNT(IF(has(stringTagMap, 'messaging.client_id'), 1, NULL)) = 0 AS clientid,
COUNT(IF(has(stringTagMap, 'service.instance.id'), 1, NULL)) = 0 AS instanceid
FROM signoz_traces.distributed_signoz_index_v2
WHERE
timestamp >= '%d'
AND timestamp <= '%d';`, queueType, start, end)

View File

@@ -41,7 +41,7 @@ var SupportedFunctions = []string{
var EvalFuncs = map[string]govaluate.ExpressionFunction{}
type prepareTracesQueryFunc func(start, end int64, panelType v3.PanelType, bq *v3.BuilderQuery, options v3.QBOptions) (string, error)
type prepareTracesQueryFunc func(start, end int64, panelType v3.PanelType, bq *v3.BuilderQuery, options v3.QBOptions, sortBy string) (string, error)
type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options v3.QBOptions) (string, error)
type prepareMetricQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options metricsV3.Options) (string, error)
@@ -196,12 +196,12 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[strin
// for ts query with group by and limit form two queries
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
limitQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, query,
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}, params.SortBy)
if err != nil {
return nil, err
}
placeholderQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType,
query, v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled})
query, v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, PreferRPM: PreferRPMFeatureEnabled}, params.SortBy)
if err != nil {
return nil, err
}
@@ -209,7 +209,7 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[strin
queries[queryName] = query
} else {
queryString, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType,
query, v3.QBOptions{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""})
query, v3.QBOptions{PreferRPM: PreferRPMFeatureEnabled, GraphLimitQtype: ""}, params.SortBy)
if err != nil {
return nil, err
}

View File

@@ -499,7 +499,7 @@ func AddOffsetToQuery(query string, offset uint64) string {
// PrepareTracesQuery returns the query string for traces
// start and end are in epoch millisecond
// step is in seconds
func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.QBOptions) (string, error) {
func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.QBOptions, sortBy string) (string, error) {
// adjust the start and end time to the step interval
if panelType == v3.PanelTypeGraph {
// adjust the start and end time to the step interval for graph panel types

View File

@@ -200,7 +200,7 @@ func orderByAttributeKeyTags(panelType v3.PanelType, items []v3.OrderBy, tags []
return str
}
func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.PanelType, options v3.QBOptions) (string, error) {
func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.PanelType, options v3.QBOptions, sortBy string) (string, error) {
tracesStart := utils.GetEpochNanoSecs(start)
tracesEnd := utils.GetEpochNanoSecs(end)
@@ -249,12 +249,21 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
if mq.AggregateOperator == v3.AggregateOperatorNoOp {
var query string
if panelType == v3.PanelTypeTrace {
withSubQuery := fmt.Sprintf(constants.TracesExplorerViewSQLSelectWithSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3_LOCAL_TABLENAME, timeFilter, filterSubQuery)
withSubQuery = tracesV3.AddLimitToQuery(withSubQuery, mq.Limit)
if mq.Offset != 0 {
withSubQuery = tracesV3.AddOffsetToQuery(withSubQuery, mq.Offset)
if sortBy == constants.TraceSortBySpanCount {
spanCountSubQuery := fmt.Sprintf(constants.TraceExplorerSpanCountSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_TRACE_SPAN_COUNT_MV, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3_LOCAL_TABLENAME, timeFilter, filterSubQuery)
query = tracesV3.AddLimitToQuery(spanCountSubQuery, mq.Limit)
if mq.Offset != 0 {
query = tracesV3.AddOffsetToQuery(query, mq.Offset)
}
} else {
withSubQuery := fmt.Sprintf(constants.TracesExplorerViewSQLSelectWithSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3_LOCAL_TABLENAME, timeFilter, filterSubQuery)
withSubQuery = tracesV3.AddLimitToQuery(withSubQuery, mq.Limit)
if mq.Offset != 0 {
withSubQuery = tracesV3.AddOffsetToQuery(withSubQuery, mq.Offset)
}
query = fmt.Sprintf(constants.TracesExplorerViewSQLSelectBeforeSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3) + withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectAfterSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3, timeFilter)
}
query = fmt.Sprintf(constants.TracesExplorerViewSQLSelectBeforeSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3) + withSubQuery + ") " + fmt.Sprintf(constants.TracesExplorerViewSQLSelectAfterSubQuery, constants.SIGNOZ_TRACE_DBNAME, constants.SIGNOZ_SPAN_INDEX_V3, timeFilter)
// adding this to avoid the distributed product mode error which doesn't allow global in
query += " settings distributed_product_mode='allow', max_memory_usage=10000000000"
} else if panelType == v3.PanelTypeList {
@@ -375,7 +384,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
// PrepareTracesQuery returns the query string for traces
// start and end are in epoch millisecond
// step is in seconds
func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.QBOptions) (string, error) {
func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.BuilderQuery, options v3.QBOptions, sortBy string) (string, error) {
// adjust the start and end time to the step interval
if panelType == v3.PanelTypeGraph {
// adjust the start and end time to the step interval for graph panel types
@@ -384,7 +393,7 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder
}
if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
// give me just the group by names
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options)
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options, sortBy)
if err != nil {
return "", err
}
@@ -392,14 +401,14 @@ func PrepareTracesQuery(start, end int64, panelType v3.PanelType, mq *v3.Builder
return query, nil
} else if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options)
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options, sortBy)
if err != nil {
return "", err
}
return query, nil
}
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options)
query, err := buildTracesQuery(start, end, mq.StepInterval, mq, panelType, options, sortBy)
if err != nil {
return "", err
}

View File

@@ -53,6 +53,8 @@ const LogsTTL = "logs"
const DurationSort = "DurationSort"
const TimestampSort = "TimestampSort"
const PreferRPM = "PreferRPM"
const TraceSortBySpanCount = "span_count"
const TraceSortByTraceDuration = "trace_duration"
func GetAlertManagerApiPrefix() string {
if os.Getenv("ALERTMANAGER_API_PREFIX") != "" {
@@ -90,7 +92,7 @@ func EnableHostsInfraMonitoring() bool {
return GetOrDefaultEnv("ENABLE_INFRA_METRICS", "true") == "true"
}
var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "true")
var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "false")
func IsDurationSortFeatureEnabled() bool {
isDurationSortFeatureEnabledStr := DurationSortFeature
@@ -248,6 +250,7 @@ const (
SIGNOZ_TIMESERIES_v4_1DAY_LOCAL_TABLENAME = "time_series_v4_1day"
SIGNOZ_TIMESERIES_v4_1WEEK_LOCAL_TABLENAME = "time_series_v4_1week"
SIGNOZ_TIMESERIES_v4_1DAY_TABLENAME = "distributed_time_series_v4_1day"
SIGNOZ_TRACE_SPAN_COUNT_MV = "trace_summary_mv"
)
var TimeoutExcludedRoutes = map[string]bool{
@@ -353,6 +356,8 @@ const (
TracesExplorerViewSQLSelectQuery = "SELECT subQuery.serviceName, subQuery.name, count() AS " +
"span_count, subQuery.durationNano, traceID FROM %s.%s GLOBAL INNER JOIN subQuery ON %s.traceID = subQuery.traceID GROUP " +
"BY traceID, subQuery.durationNano, subQuery.name, subQuery.serviceName ORDER BY subQuery.durationNano desc;"
TraceExplorerSpanCountSubQuery = "SELECT t.trace_id, t.num_spans FROM %s.%s as t GLOBAL INNER JOIN (SELECT * FROM (SELECT trace_id FROM %s.%s WHERE %s %s LIMIT 1 BY trace_id) AS inner_filtered ) AS filtered_traces ON t.trace_id = filtered_traces.trace_id ORDER BY t.num_spans DESC "
TraceExplorerEnrichSpanCountQuery = "SELECT name, serviceName, duration_nano FROM %s.%s WHERE trace_id = %s"
)
// ReservedColumnTargetAliases identifies result value from a user

View File

@@ -379,6 +379,7 @@ type QueryRangeParamsV3 struct {
NoCache bool `json:"noCache"`
Version string `json:"-"`
FormatForWeb bool `json:"formatForWeb,omitempty"`
SortBy string `json:"sortBy,omitempty"`
}
func (q *QueryRangeParamsV3) Clone() *QueryRangeParamsV3 {