Compare commits

..

1 Commits

Author SHA1 Message Date
Abhi Kumar
a39bc094e8 chore: added changes for sorting tooltip content 2026-05-15 13:18:27 +05:30
23 changed files with 49 additions and 1728 deletions

View File

@@ -190,7 +190,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.124.0
image: signoz/signoz:v0.122.0
ports:
- "8080:8080" # signoz port
# - "6060:6060" # pprof port

View File

@@ -117,7 +117,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.124.0
image: signoz/signoz:v0.122.0
ports:
- "8080:8080" # signoz port
volumes:

View File

@@ -181,7 +181,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.124.0}
image: signoz/signoz:${VERSION:-v0.122.0}
container_name: signoz
ports:
- "8080:8080" # signoz port

View File

@@ -109,7 +109,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.124.0}
image: signoz/signoz:${VERSION:-v0.122.0}
container_name: signoz
ports:
- "8080:8080" # signoz port

View File

@@ -2580,76 +2580,6 @@ components:
- requiredMetricsCheck
- endTimeBeforeRetention
type: object
InframonitoringtypesDaemonSetRecord:
properties:
currentNodes:
type: integer
daemonSetCPU:
format: double
type: number
daemonSetCPULimit:
format: double
type: number
daemonSetCPURequest:
format: double
type: number
daemonSetMemory:
format: double
type: number
daemonSetMemoryLimit:
format: double
type: number
daemonSetMemoryRequest:
format: double
type: number
daemonSetName:
type: string
desiredNodes:
type: integer
meta:
additionalProperties:
type: string
nullable: true
type: object
podCountsByPhase:
$ref: '#/components/schemas/InframonitoringtypesPodCountsByPhase'
required:
- daemonSetName
- daemonSetCPU
- daemonSetCPURequest
- daemonSetCPULimit
- daemonSetMemory
- daemonSetMemoryRequest
- daemonSetMemoryLimit
- desiredNodes
- currentNodes
- podCountsByPhase
- meta
type: object
InframonitoringtypesDaemonSets:
properties:
endTimeBeforeRetention:
type: boolean
records:
items:
$ref: '#/components/schemas/InframonitoringtypesDaemonSetRecord'
nullable: true
type: array
requiredMetricsCheck:
$ref: '#/components/schemas/InframonitoringtypesRequiredMetricsCheck'
total:
type: integer
type:
$ref: '#/components/schemas/InframonitoringtypesResponseType'
warning:
$ref: '#/components/schemas/Querybuildertypesv5QueryWarnData'
required:
- type
- records
- total
- requiredMetricsCheck
- endTimeBeforeRetention
type: object
InframonitoringtypesDeploymentRecord:
properties:
availablePods:
@@ -3126,32 +3056,6 @@ components:
- end
- limit
type: object
InframonitoringtypesPostableDaemonSets:
properties:
end:
format: int64
type: integer
filter:
$ref: '#/components/schemas/Querybuildertypesv5Filter'
groupBy:
items:
$ref: '#/components/schemas/Querybuildertypesv5GroupByKey'
nullable: true
type: array
limit:
type: integer
offset:
type: integer
orderBy:
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
start:
format: int64
type: integer
required:
- start
- end
- limit
type: object
InframonitoringtypesPostableDeployments:
properties:
end:
@@ -12371,83 +12275,6 @@ paths:
summary: List Clusters for Infra Monitoring
tags:
- inframonitoring
/api/v2/infra_monitoring/daemonsets:
post:
deprecated: false
description: 'Returns a paginated list of Kubernetes DaemonSets with key aggregated
pod metrics: CPU usage and memory working set summed across pods owned by
the daemonset, plus average CPU/memory request and limit utilization (daemonSetCPURequest,
daemonSetCPULimit, daemonSetMemoryRequest, daemonSetMemoryLimit). Each row
also reports the latest known node-level counters from kube-state-metrics:
desiredNodes (k8s.daemonset.desired_scheduled_nodes, the number of nodes the
daemonset wants to run on) and currentNodes (k8s.daemonset.current_scheduled_nodes,
the number of nodes the daemonset currently runs on) — note these are node
counts, not pod counts. It also reports per-group podCountsByPhase ({ pending,
running, succeeded, failed, unknown } from each pod''s latest k8s.pod.phase
value). Each daemonset includes metadata attributes (k8s.daemonset.name, k8s.namespace.name,
k8s.cluster.name). The response type is ''list'' for the default k8s.daemonset.name
grouping or ''grouped_list'' for custom groupBy keys; in both modes every
row aggregates pods owned by daemonsets in the group. Supports filtering via
a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit
/ memory / memory_request / memory_limit / desired_nodes / current_nodes,
and pagination via offset/limit. Also reports missing required metrics and
whether the requested time range falls before the data retention boundary.
Numeric metric fields (daemonSetCPU, daemonSetCPURequest, daemonSetCPULimit,
daemonSetMemory, daemonSetMemoryRequest, daemonSetMemoryLimit, desiredNodes,
currentNodes) return -1 as a sentinel when no data is available for that field.'
operationId: ListDaemonSets
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/InframonitoringtypesPostableDaemonSets'
responses:
"200":
content:
application/json:
schema:
properties:
data:
$ref: '#/components/schemas/InframonitoringtypesDaemonSets'
status:
type: string
required:
- status
- data
type: object
description: OK
"400":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Bad Request
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- VIEWER
- tokenizer:
- VIEWER
summary: List DaemonSets for Infra Monitoring
tags:
- inframonitoring
/api/v2/infra_monitoring/deployments:
post:
deprecated: false

View File

@@ -14,7 +14,6 @@ import type {
import type {
InframonitoringtypesPostableClustersDTO,
InframonitoringtypesPostableDaemonSetsDTO,
InframonitoringtypesPostableDeploymentsDTO,
InframonitoringtypesPostableHostsDTO,
InframonitoringtypesPostableJobsDTO,
@@ -24,7 +23,6 @@ import type {
InframonitoringtypesPostableStatefulSetsDTO,
InframonitoringtypesPostableVolumesDTO,
ListClusters200,
ListDaemonSets200,
ListDeployments200,
ListHosts200,
ListJobs200,
@@ -122,89 +120,6 @@ export const useListClusters = <
> => {
return useMutation(getListClustersMutationOptions(options));
};
/**
* Returns a paginated list of Kubernetes DaemonSets with key aggregated pod metrics: CPU usage and memory working set summed across pods owned by the daemonset, plus average CPU/memory request and limit utilization (daemonSetCPURequest, daemonSetCPULimit, daemonSetMemoryRequest, daemonSetMemoryLimit). Each row also reports the latest known node-level counters from kube-state-metrics: desiredNodes (k8s.daemonset.desired_scheduled_nodes, the number of nodes the daemonset wants to run on) and currentNodes (k8s.daemonset.current_scheduled_nodes, the number of nodes the daemonset currently runs on) — note these are node counts, not pod counts. It also reports per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each daemonset includes metadata attributes (k8s.daemonset.name, k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.daemonset.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods owned by daemonsets in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit / memory / memory_request / memory_limit / desired_nodes / current_nodes, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (daemonSetCPU, daemonSetCPURequest, daemonSetCPULimit, daemonSetMemory, daemonSetMemoryRequest, daemonSetMemoryLimit, desiredNodes, currentNodes) return -1 as a sentinel when no data is available for that field.
* @summary List DaemonSets for Infra Monitoring
*/
export const listDaemonSets = (
inframonitoringtypesPostableDaemonSetsDTO?: BodyType<InframonitoringtypesPostableDaemonSetsDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListDaemonSets200>({
url: `/api/v2/infra_monitoring/daemonsets`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesPostableDaemonSetsDTO,
signal,
});
};
export const getListDaemonSetsMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listDaemonSets>>,
TError,
{ data?: BodyType<InframonitoringtypesPostableDaemonSetsDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof listDaemonSets>>,
TError,
{ data?: BodyType<InframonitoringtypesPostableDaemonSetsDTO> },
TContext
> => {
const mutationKey = ['listDaemonSets'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof listDaemonSets>>,
{ data?: BodyType<InframonitoringtypesPostableDaemonSetsDTO> }
> = (props) => {
const { data } = props ?? {};
return listDaemonSets(data);
};
return { mutationFn, ...mutationOptions };
};
export type ListDaemonSetsMutationResult = NonNullable<
Awaited<ReturnType<typeof listDaemonSets>>
>;
export type ListDaemonSetsMutationBody =
| BodyType<InframonitoringtypesPostableDaemonSetsDTO>
| undefined;
export type ListDaemonSetsMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List DaemonSets for Infra Monitoring
*/
export const useListDaemonSets = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listDaemonSets>>,
TError,
{ data?: BodyType<InframonitoringtypesPostableDaemonSetsDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof listDaemonSets>>,
TError,
{ data?: BodyType<InframonitoringtypesPostableDaemonSetsDTO> },
TContext
> => {
return useMutation(getListDaemonSetsMutationOptions(options));
};
/**
* Returns a paginated list of Kubernetes Deployments with key aggregated pod metrics: CPU usage and memory working set summed across pods owned by the deployment, plus average CPU/memory request and limit utilization (deploymentCPURequest, deploymentCPULimit, deploymentMemoryRequest, deploymentMemoryLimit). Each row also reports the latest known desiredPods (k8s.deployment.desired) and availablePods (k8s.deployment.available) replica counts and per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each deployment includes metadata attributes (k8s.deployment.name, k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.deployment.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods owned by deployments in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit / memory / memory_request / memory_limit / desired_pods / available_pods, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (deploymentCPU, deploymentCPURequest, deploymentCPULimit, deploymentMemory, deploymentMemoryRequest, deploymentMemoryLimit, desiredPods, availablePods) return -1 as a sentinel when no data is available for that field.
* @summary List Deployments for Infra Monitoring

View File

@@ -3376,84 +3376,6 @@ export interface InframonitoringtypesClustersDTO {
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export type InframonitoringtypesDaemonSetRecordDTOMetaAnyOf = {
[key: string]: string;
};
/**
* @nullable
*/
export type InframonitoringtypesDaemonSetRecordDTOMeta =
InframonitoringtypesDaemonSetRecordDTOMetaAnyOf | null;
export interface InframonitoringtypesDaemonSetRecordDTO {
/**
* @type integer
*/
currentNodes: number;
/**
* @type number
* @format double
*/
daemonSetCPU: number;
/**
* @type number
* @format double
*/
daemonSetCPULimit: number;
/**
* @type number
* @format double
*/
daemonSetCPURequest: number;
/**
* @type number
* @format double
*/
daemonSetMemory: number;
/**
* @type number
* @format double
*/
daemonSetMemoryLimit: number;
/**
* @type number
* @format double
*/
daemonSetMemoryRequest: number;
/**
* @type string
*/
daemonSetName: string;
/**
* @type integer
*/
desiredNodes: number;
/**
* @type object,null
*/
meta: InframonitoringtypesDaemonSetRecordDTOMeta;
podCountsByPhase: InframonitoringtypesPodCountsByPhaseDTO;
}
export interface InframonitoringtypesDaemonSetsDTO {
/**
* @type boolean
*/
endTimeBeforeRetention: boolean;
/**
* @type array,null
*/
records: InframonitoringtypesDaemonSetRecordDTO[] | null;
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
/**
* @type integer
*/
total: number;
type: InframonitoringtypesResponseTypeDTO;
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export type InframonitoringtypesDeploymentRecordDTOMetaAnyOf = {
[key: string]: string;
};
@@ -4004,33 +3926,6 @@ export interface InframonitoringtypesPostableClustersDTO {
start: number;
}
export interface InframonitoringtypesPostableDaemonSetsDTO {
/**
* @type integer
* @format int64
*/
end: number;
filter?: Querybuildertypesv5FilterDTO;
/**
* @type array,null
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start: number;
}
export interface InframonitoringtypesPostableDeploymentsDTO {
/**
* @type integer
@@ -8535,14 +8430,6 @@ export type ListClusters200 = {
status: string;
};
export type ListDaemonSets200 = {
data: InframonitoringtypesDaemonSetsDTO;
/**
* @type string
*/
status: string;
};
export type ListDeployments200 = {
data: InframonitoringtypesDeploymentsDTO;
/**

View File

@@ -189,7 +189,7 @@ describe('Tooltip utils', () => {
];
}
it('builds tooltip content in series-index order with isActive flag set correctly', () => {
it('builds tooltip content sorted by value descending with isActive flag set correctly', () => {
const data: AlignedData = [[0], [10], [20], [30]];
const series = createSeriesConfig();
const dataIndexes = [null, 0, 0, 0];
@@ -206,21 +206,21 @@ describe('Tooltip utils', () => {
});
expect(result).toHaveLength(2);
// Series are returned in series-index order (A=index 1 before B=index 2)
// Sorted by value descending: B (20) before A (10)
expect(result[0]).toMatchObject<Partial<TooltipContentItem>>({
label: 'A',
value: 10,
tooltipValue: 'formatted-10',
color: '#ff0000',
isActive: false,
});
expect(result[1]).toMatchObject<Partial<TooltipContentItem>>({
label: 'B',
value: 20,
tooltipValue: 'formatted-20',
color: 'color-2',
isActive: true,
});
expect(result[1]).toMatchObject<Partial<TooltipContentItem>>({
label: 'A',
value: 10,
tooltipValue: 'formatted-10',
color: '#ff0000',
isActive: false,
});
});
it('skips series with null data index or non-finite values', () => {
@@ -274,7 +274,7 @@ describe('Tooltip utils', () => {
expect(result[1].value).toBe(30);
});
it('returns items in series-index order', () => {
it('returns items sorted by value descending', () => {
// Series values in non-sorted order: 3, 1, 4, 2
const data: AlignedData = [[0], [3], [1], [4], [2]];
const series: Series[] = [
@@ -297,7 +297,7 @@ describe('Tooltip utils', () => {
decimalPrecision,
});
expect(result.map((item) => item.value)).toStrictEqual([3, 1, 4, 2]);
expect(result.map((item) => item.value)).toStrictEqual([4, 3, 2, 1]);
});
});
});

View File

@@ -142,5 +142,7 @@ export function buildTooltipContent({
}
}
items.sort((a, b) => b.value - a.value);
return items;
}

View File

@@ -181,24 +181,5 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v2/infra_monitoring/daemonsets", handler.New(
provider.authzMiddleware.ViewAccess(provider.infraMonitoringHandler.ListDaemonSets),
handler.OpenAPIDef{
ID: "ListDaemonSets",
Tags: []string{"inframonitoring"},
Summary: "List DaemonSets for Infra Monitoring",
Description: "Returns a paginated list of Kubernetes DaemonSets with key aggregated pod metrics: CPU usage and memory working set summed across pods owned by the daemonset, plus average CPU/memory request and limit utilization (daemonSetCPURequest, daemonSetCPULimit, daemonSetMemoryRequest, daemonSetMemoryLimit). Each row also reports the latest known node-level counters from kube-state-metrics: desiredNodes (k8s.daemonset.desired_scheduled_nodes, the number of nodes the daemonset wants to run on) and currentNodes (k8s.daemonset.current_scheduled_nodes, the number of nodes the daemonset currently runs on) — note these are node counts, not pod counts. It also reports per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each daemonset includes metadata attributes (k8s.daemonset.name, k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.daemonset.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods owned by daemonsets in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit / memory / memory_request / memory_limit / desired_nodes / current_nodes, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (daemonSetCPU, daemonSetCPURequest, daemonSetCPULimit, daemonSetMemory, daemonSetMemoryRequest, daemonSetMemoryLimit, desiredNodes, currentNodes) return -1 as a sentinel when no data is available for that field.",
Request: new(inframonitoringtypes.PostableDaemonSets),
RequestContentType: "application/json",
Response: new(inframonitoringtypes.DaemonSets),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodPost).GetError(); err != nil {
return err
}
return nil
}

View File

@@ -1,148 +0,0 @@
package implinframonitoring
import (
"context"
"slices"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/valuer"
)
// buildDaemonSetRecords assembles the page records. Pod phase counts come from
// phaseCounts in both modes; every row is a group of pods (one daemonset in
// list mode, an arbitrary roll-up in grouped_list mode), so there's no
// per-row "current phase" concept.
func buildDaemonSetRecords(
resp *qbtypes.QueryRangeResponse,
pageGroups []map[string]string,
groupBy []qbtypes.GroupByKey,
metadataMap map[string]map[string]string,
phaseCounts map[string]podPhaseCounts,
) []inframonitoringtypes.DaemonSetRecord {
metricsMap := parseFullQueryResponse(resp, groupBy)
records := make([]inframonitoringtypes.DaemonSetRecord, 0, len(pageGroups))
for _, labels := range pageGroups {
compositeKey := compositeKeyFromLabels(labels, groupBy)
daemonSetName := labels[daemonSetNameAttrKey]
record := inframonitoringtypes.DaemonSetRecord{ // initialize with default values
DaemonSetName: daemonSetName,
DaemonSetCPU: -1,
DaemonSetCPURequest: -1,
DaemonSetCPULimit: -1,
DaemonSetMemory: -1,
DaemonSetMemoryRequest: -1,
DaemonSetMemoryLimit: -1,
DesiredNodes: -1,
CurrentNodes: -1,
Meta: map[string]string{},
}
if metrics, ok := metricsMap[compositeKey]; ok {
if v, exists := metrics["A"]; exists {
record.DaemonSetCPU = v
}
if v, exists := metrics["B"]; exists {
record.DaemonSetCPURequest = v
}
if v, exists := metrics["C"]; exists {
record.DaemonSetCPULimit = v
}
if v, exists := metrics["D"]; exists {
record.DaemonSetMemory = v
}
if v, exists := metrics["E"]; exists {
record.DaemonSetMemoryRequest = v
}
if v, exists := metrics["F"]; exists {
record.DaemonSetMemoryLimit = v
}
if v, exists := metrics["H"]; exists {
record.DesiredNodes = int(v)
}
if v, exists := metrics["I"]; exists {
record.CurrentNodes = int(v)
}
}
if phaseCountsForGroup, ok := phaseCounts[compositeKey]; ok {
record.PodCountsByPhase = inframonitoringtypes.PodCountsByPhase{
Pending: phaseCountsForGroup.Pending,
Running: phaseCountsForGroup.Running,
Succeeded: phaseCountsForGroup.Succeeded,
Failed: phaseCountsForGroup.Failed,
Unknown: phaseCountsForGroup.Unknown,
}
}
if attrs, ok := metadataMap[compositeKey]; ok {
for k, v := range attrs {
record.Meta[k] = v
}
}
records = append(records, record)
}
return records
}
func (m *module) getTopDaemonSetGroups(
ctx context.Context,
orgID valuer.UUID,
req *inframonitoringtypes.PostableDaemonSets,
metadataMap map[string]map[string]string,
) ([]map[string]string, error) {
orderByKey := req.OrderBy.Key.Name
queryNamesForOrderBy := orderByToDaemonSetsQueryNames[orderByKey]
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
topReq := &qbtypes.QueryRangeRequest{
Start: uint64(req.Start),
End: uint64(req.End),
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
},
}
for _, envelope := range m.newDaemonSetsTableListQuery().CompositeQuery.Queries {
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
continue
}
copied := envelope
if copied.Type == qbtypes.QueryTypeBuilder {
existingExpr := ""
if f := copied.GetFilter(); f != nil {
existingExpr = f.Expression
}
reqFilterExpr := ""
if req.Filter != nil {
reqFilterExpr = req.Filter.Expression
}
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
copied.SetFilter(&qbtypes.Filter{Expression: merged})
copied.SetGroupBy(req.GroupBy)
}
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
}
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
if err != nil {
return nil, err
}
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
}
func (m *module) getDaemonSetsTableMetadata(ctx context.Context, req *inframonitoringtypes.PostableDaemonSets) (map[string]map[string]string, error) {
var nonGroupByAttrs []string
for _, key := range daemonSetAttrKeysForMetadata {
if !isKeyInGroupByAttrs(req.GroupBy, key) {
nonGroupByAttrs = append(nonGroupByAttrs, key)
}
}
return m.getMetadata(ctx, daemonSetsTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
}

View File

@@ -1,237 +0,0 @@
package implinframonitoring
import (
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
const (
daemonSetNameAttrKey = "k8s.daemonset.name"
daemonSetsBaseFilterExpr = "k8s.daemonset.name != ''"
)
var daemonSetNameGroupByKey = qbtypes.GroupByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: daemonSetNameAttrKey,
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
}
// daemonSetsTableMetricNamesList drives the existence/retention check.
// Includes k8s.pod.phase even though phase isn't part of the QB composite query —
// it is queried separately via getPerGroupPodPhaseCounts, and we want the
// response to short-circuit cleanly when the phase metric is absent.
var daemonSetsTableMetricNamesList = []string{
"k8s.pod.phase",
"k8s.pod.cpu.usage",
"k8s.pod.cpu_request_utilization",
"k8s.pod.cpu_limit_utilization",
"k8s.pod.memory.working_set",
"k8s.pod.memory_request_utilization",
"k8s.pod.memory_limit_utilization",
"k8s.daemonset.desired_scheduled_nodes",
"k8s.daemonset.current_scheduled_nodes",
}
// Carried forward from v1 daemonSetAttrsToEnrich
// (pkg/query-service/app/inframetrics/daemonsets.go:29-33).
var daemonSetAttrKeysForMetadata = []string{
"k8s.daemonset.name",
"k8s.namespace.name",
"k8s.cluster.name",
}
// orderByToDaemonSetsQueryNames maps the orderBy column to the query name
// used for ranking daemonset groups. v2 B/C/E/F are direct metrics, no
// formula deps — so unlike v1 we don't carry A/D.
var orderByToDaemonSetsQueryNames = map[string][]string{
inframonitoringtypes.DaemonSetsOrderByCPU: {"A"},
inframonitoringtypes.DaemonSetsOrderByCPURequest: {"B"},
inframonitoringtypes.DaemonSetsOrderByCPULimit: {"C"},
inframonitoringtypes.DaemonSetsOrderByMemory: {"D"},
inframonitoringtypes.DaemonSetsOrderByMemoryRequest: {"E"},
inframonitoringtypes.DaemonSetsOrderByMemoryLimit: {"F"},
inframonitoringtypes.DaemonSetsOrderByDesiredNodes: {"H"},
inframonitoringtypes.DaemonSetsOrderByCurrentNodes: {"I"},
}
// newDaemonSetsTableListQuery builds the composite QB v5 request for the daemonsets list.
// Eight builder queries: A..F roll up pod-level metrics by daemonset, H/I take the
// latest daemonset-level desired/current scheduled-node counts. Restarts (v1 query G)
// is intentionally omitted to match the v2 pods/deployments/statefulsets/jobs pattern.
//
// Every builder query carries the base filter `daemonSetsBaseFilterExpr`. Reason:
// pod-level metrics (A..F) are emitted for every pod regardless of whether the
// pod belongs to a DaemonSet; only DaemonSet-owned pods carry the
// `k8s.daemonset.name` resource attribute. Without this filter, standalone pods
// and pods owned by other workloads (Deployment/StatefulSet/Job/...) collapse into
// a single empty-string group under the default groupBy. v1's GetDaemonSetList
// applied the same filter via FilterOperatorExists.
func (m *module) newDaemonSetsTableListQuery() *qbtypes.QueryRangeRequest {
queries := []qbtypes.QueryEnvelope{
// Query A: k8s.pod.cpu.usage — sum of pod CPU within the group.
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "A",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.cpu.usage",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
Disabled: false,
},
},
// Query B: k8s.pod.cpu_request_utilization — avg across pods in the group.
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "B",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.cpu_request_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
Disabled: false,
},
},
// Query C: k8s.pod.cpu_limit_utilization — avg across pods in the group.
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "C",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.cpu_limit_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
Disabled: false,
},
},
// Query D: k8s.pod.memory.working_set — sum of pod memory within the group.
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "D",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.memory.working_set",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
Disabled: false,
},
},
// Query E: k8s.pod.memory_request_utilization — avg across pods in the group.
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "E",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.memory_request_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
Disabled: false,
},
},
// Query F: k8s.pod.memory_limit_utilization — avg across pods in the group.
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "F",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.memory_limit_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
Disabled: false,
},
},
// Query H: k8s.daemonset.desired_scheduled_nodes — latest known desired node count per group.
// v1 used TimeAggregationAnyLast (v3) → mapped to TimeAggregationLatest in v5;
// SpaceAggregationSum + ReduceToLast preserve v1's "latest, summed across the group".
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "H",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.daemonset.desired_scheduled_nodes",
TimeAggregation: metrictypes.TimeAggregationLatest,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToLast,
},
},
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
Disabled: false,
},
},
// Query I: k8s.daemonset.current_scheduled_nodes — latest known currently scheduled node count per group.
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "I",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.daemonset.current_scheduled_nodes",
TimeAggregation: metrictypes.TimeAggregationLatest,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToLast,
},
},
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
Disabled: false,
},
},
}
return &qbtypes.QueryRangeRequest{
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: queries,
},
}
}

View File

@@ -237,27 +237,3 @@ func (h *handler) ListJobs(rw http.ResponseWriter, req *http.Request) {
render.Success(rw, http.StatusOK, result)
}
func (h *handler) ListDaemonSets(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
var parsedReq inframonitoringtypes.PostableDaemonSets
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
render.Error(rw, err)
return
}
result, err := h.module.ListDaemonSets(req.Context(), orgID, &parsedReq)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusOK, result)
}

View File

@@ -900,100 +900,3 @@ func (m *module) ListJobs(ctx context.Context, orgID valuer.UUID, req *inframoni
return resp, nil
}
func (m *module) ListDaemonSets(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableDaemonSets) (*inframonitoringtypes.DaemonSets, error) {
if err := req.Validate(); err != nil {
return nil, err
}
resp := &inframonitoringtypes.DaemonSets{}
if req.OrderBy == nil {
req.OrderBy = &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: inframonitoringtypes.DaemonSetsOrderByCPU,
},
},
Direction: qbtypes.OrderDirectionDesc,
}
}
if len(req.GroupBy) == 0 {
req.GroupBy = []qbtypes.GroupByKey{daemonSetNameGroupByKey}
resp.Type = inframonitoringtypes.ResponseTypeList
} else {
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
}
// Bake the workload base filter into req.Filter so all downstream helpers pick it up.
if req.Filter == nil {
req.Filter = &qbtypes.Filter{}
}
req.Filter.Expression = mergeFilterExpressions(daemonSetsBaseFilterExpr, req.Filter.Expression)
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, daemonSetsTableMetricNamesList)
if err != nil {
return nil, err
}
if len(missingMetrics) > 0 {
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
resp.Records = []inframonitoringtypes.DaemonSetRecord{}
resp.Total = 0
return resp, nil
}
if req.End < int64(minFirstReportedUnixMilli) {
resp.EndTimeBeforeRetention = true
resp.Records = []inframonitoringtypes.DaemonSetRecord{}
resp.Total = 0
return resp, nil
}
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: []string{}}
metadataMap, err := m.getDaemonSetsTableMetadata(ctx, req)
if err != nil {
return nil, err
}
resp.Total = len(metadataMap)
pageGroups, err := m.getTopDaemonSetGroups(ctx, orgID, req, metadataMap)
if err != nil {
return nil, err
}
if len(pageGroups) == 0 {
resp.Records = []inframonitoringtypes.DaemonSetRecord{}
return resp, nil
}
filterExpr := ""
if req.Filter != nil {
filterExpr = req.Filter.Expression
}
fullQueryReq := buildFullQueryRequest(req.Start, req.End, filterExpr, req.GroupBy, pageGroups, m.newDaemonSetsTableListQuery())
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
if err != nil {
return nil, err
}
// Reuse the pods phase-counts CTE function via a temp struct — it reads only
// Start/End/Filter/GroupBy from PostablePods. Pods owned by a DaemonSet carry
// k8s.daemonset.name as a resource attribute, so default-groupBy gives
// per-daemonset phase counts automatically.
phaseCounts, err := m.getPerGroupPodPhaseCounts(ctx, &inframonitoringtypes.PostablePods{
Start: req.Start,
End: req.End,
Filter: req.Filter,
GroupBy: req.GroupBy,
}, pageGroups)
if err != nil {
return nil, err
}
resp.Records = buildDaemonSetRecords(queryResp, pageGroups, req.GroupBy, metadataMap, phaseCounts)
resp.Warning = queryResp.Warning
return resp, nil
}

View File

@@ -18,7 +18,6 @@ type Handler interface {
ListDeployments(http.ResponseWriter, *http.Request)
ListStatefulSets(http.ResponseWriter, *http.Request)
ListJobs(http.ResponseWriter, *http.Request)
ListDaemonSets(http.ResponseWriter, *http.Request)
}
type Module interface {
@@ -31,5 +30,4 @@ type Module interface {
ListDeployments(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableDeployments) (*inframonitoringtypes.Deployments, error)
ListStatefulSets(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableStatefulSets) (*inframonitoringtypes.StatefulSets, error)
ListJobs(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableJobs) (*inframonitoringtypes.Jobs, error)
ListDaemonSets(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableDaemonSets) (*inframonitoringtypes.DaemonSets, error)
}

View File

@@ -41,7 +41,7 @@ func (c *conditionBuilder) conditionFor(
// TODO(Piyush): Update this to support multiple JSON columns based on evolutions
for _, column := range columns {
// TODO(Tushar): thread orgID here to evaluate correctly
if column.Type.GetType() == schema.ColumnTypeEnumJSON && key.FieldContext == telemetrytypes.FieldContextBody && c.fl.BooleanOrEmpty(ctx, flagger.FeatureUseJSONBody, featuretypes.NewFlaggerEvaluationContext(valuer.UUID{})) && key.Name != messageSubField {
if column.Type.GetType() == schema.ColumnTypeEnumJSON && c.fl.BooleanOrEmpty(ctx, flagger.FeatureUseJSONBody, featuretypes.NewFlaggerEvaluationContext(valuer.UUID{})) && key.Name != messageSubField {
valueType, value := InferDataType(value, operator, key)
cond, err := NewJSONConditionBuilder(key, valueType).buildJSONCondition(operator, value, sb)
if err != nil {

View File

@@ -33,7 +33,7 @@ func (t TestExpected) GetQuery() string {
}
func TestJSONStmtBuilder_TimeSeries(t *testing.T) {
statementBuilder, _ := buildJSONTestStatementBuilder(t, false)
statementBuilder := buildJSONTestStatementBuilder(t, false)
cases := []struct {
name string
@@ -171,7 +171,7 @@ func TestStmtBuilderTimeSeriesBodyGroupByPromoted(t *testing.T) {
*/
func TestJSONStmtBuilder_PrimitivePaths(t *testing.T) {
statementBuilder, _ := buildJSONTestStatementBuilder(t, false)
statementBuilder := buildJSONTestStatementBuilder(t, false)
cases := []struct {
name string
filter string
@@ -494,7 +494,7 @@ func TestStatementBuilderListQueryBodyPromoted(t *testing.T) {
*/
func TestJSONStmtBuilder_ArrayPaths(t *testing.T) {
statementBuilder, _ := buildJSONTestStatementBuilder(t, false)
statementBuilder := buildJSONTestStatementBuilder(t, false)
cases := []struct {
name string
filter string
@@ -799,7 +799,7 @@ func TestJSONStmtBuilder_ArrayPaths(t *testing.T) {
}
func TestJSONStmtBuilder_IndexedPaths(t *testing.T) {
statementBuilder, _ := buildJSONTestStatementBuilder(t, true)
statementBuilder := buildJSONTestStatementBuilder(t, true)
cases := []struct {
name string
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
@@ -918,7 +918,7 @@ func TestJSONStmtBuilder_IndexedPaths(t *testing.T) {
}
func TestJSONStmtBuilder_SelectField(t *testing.T) {
statementBuilder, _ := buildJSONTestStatementBuilder(t, false)
statementBuilder := buildJSONTestStatementBuilder(t, false)
cases := []struct {
name string
@@ -1006,7 +1006,7 @@ func TestJSONStmtBuilder_SelectField(t *testing.T) {
}
func TestJSONStmtBuilder_OrderBy(t *testing.T) {
statementBuilder, _ := buildJSONTestStatementBuilder(t, false)
statementBuilder := buildJSONTestStatementBuilder(t, false)
cases := []struct {
name string
@@ -1082,69 +1082,6 @@ func TestJSONStmtBuilder_OrderBy(t *testing.T) {
}
}
func TestResourceAggrAndGroupBy_WithJSONEnabled(t *testing.T) {
statementBuilder, metadataStore := buildJSONTestStatementBuilder(t, false)
releaseTime := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
keysMap := buildCompleteFieldKeyMap(releaseTime)
for _, keys := range keysMap {
for _, key := range keys {
metadataStore.SetKey(key)
}
}
cases := []struct {
name string
requestType qbtypes.RequestType
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
expected qbtypes.Statement
expectedErrContains string
}{
{
name: "resource_aggregation_and_group_by_with_json_enabled",
requestType: qbtypes.RequestTypeTimeSeries,
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
Signal: telemetrytypes.SignalLogs,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
GroupBy: []qbtypes.GroupByKey{
{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "region",
},
},
},
Filter: &qbtypes.Filter{
Expression: "user.name exists",
},
Aggregations: []qbtypes.LogAggregation{
{
Expression: "count_distinct(service.name)",
},
},
},
expected: qbtypes.Statement{
Query: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(resource.`region`::String IS NOT NULL, resource.`region`::String, NULL)) AS `region`, countDistinct(multiIf(resource.`service.name`::String IS NOT NULL, resource.`service.name`::String, NULL)) AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE ((dynamicElement(body_v2.`user.name`, 'String') IS NOT NULL) OR mapContains(attributes_string, 'user.name') = ?) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY ts, `region`",
Args: []any{true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448)},
Warnings: []string{"Key `user.name` is ambiguous, found 2 different combinations of field context / data type: [name=user.name,context=body,datatype=string name=user.name,context=attribute,datatype=string]."},
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
if c.expectedErrContains != "" {
require.Error(t, err)
require.Contains(t, err.Error(), c.expectedErrContains)
} else {
require.NoError(t, err)
require.Equal(t, c.expected.Query, q.Query)
require.Equal(t, c.expected.Args, q.Args)
require.Equal(t, c.expected.Warnings, q.Warnings)
}
})
}
}
func buildTestTelemetryMetadataStore(t *testing.T, addIndexes bool) *telemetrytypestest.MockMetadataStore {
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
mockMetadataStore.SetStaticFields(IntrinsicFields)
@@ -1186,7 +1123,7 @@ func buildTestTelemetryMetadataStore(t *testing.T, addIndexes bool) *telemetryty
return mockMetadataStore
}
func buildJSONTestStatementBuilder(t *testing.T, addIndexes bool) (*logQueryStatementBuilder, *telemetrytypestest.MockMetadataStore) {
func buildJSONTestStatementBuilder(t *testing.T, addIndexes bool) *logQueryStatementBuilder {
t.Helper()
mockMetadataStore := buildTestTelemetryMetadataStore(t, addIndexes)
@@ -1207,5 +1144,5 @@ func buildJSONTestStatementBuilder(t *testing.T, addIndexes bool) (*logQueryStat
fl,
)
return statementBuilder, mockMetadataStore
return statementBuilder
}

View File

@@ -1,105 +0,0 @@
package inframonitoringtypes
import (
"encoding/json"
"slices"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
)
type DaemonSets struct {
Type ResponseType `json:"type" required:"true"`
Records []DaemonSetRecord `json:"records" required:"true"`
Total int `json:"total" required:"true"`
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck" required:"true"`
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention" required:"true"`
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
}
type DaemonSetRecord struct {
DaemonSetName string `json:"daemonSetName" required:"true"`
DaemonSetCPU float64 `json:"daemonSetCPU" required:"true"`
DaemonSetCPURequest float64 `json:"daemonSetCPURequest" required:"true"`
DaemonSetCPULimit float64 `json:"daemonSetCPULimit" required:"true"`
DaemonSetMemory float64 `json:"daemonSetMemory" required:"true"`
DaemonSetMemoryRequest float64 `json:"daemonSetMemoryRequest" required:"true"`
DaemonSetMemoryLimit float64 `json:"daemonSetMemoryLimit" required:"true"`
DesiredNodes int `json:"desiredNodes" required:"true"`
CurrentNodes int `json:"currentNodes" required:"true"`
PodCountsByPhase PodCountsByPhase `json:"podCountsByPhase" required:"true"`
Meta map[string]string `json:"meta" required:"true"`
}
// PostableDaemonSets is the request body for the v2 daemonsets list API.
type PostableDaemonSets struct {
Start int64 `json:"start" required:"true"`
End int64 `json:"end" required:"true"`
Filter *qbtypes.Filter `json:"filter"`
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
OrderBy *qbtypes.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit" required:"true"`
}
// Validate ensures PostableDaemonSets contains acceptable values.
func (req *PostableDaemonSets) Validate() error {
if req == nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
}
if req.Start <= 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid start time %d: start must be greater than 0",
req.Start,
)
}
if req.End <= 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid end time %d: end must be greater than 0",
req.End,
)
}
if req.Start >= req.End {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid time range: start (%d) must be less than end (%d)",
req.Start,
req.End,
)
}
if req.Limit < 1 || req.Limit > 5000 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
}
if req.Offset < 0 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
}
if req.OrderBy != nil {
if !slices.Contains(DaemonSetsValidOrderByKeys, req.OrderBy.Key.Name) {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
}
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
}
}
return nil
}
// UnmarshalJSON validates input immediately after decoding.
func (req *PostableDaemonSets) UnmarshalJSON(data []byte) error {
type raw PostableDaemonSets
var decoded raw
if err := json.Unmarshal(data, &decoded); err != nil {
return err
}
*req = PostableDaemonSets(decoded)
return req.Validate()
}

View File

@@ -1,23 +0,0 @@
package inframonitoringtypes
const (
DaemonSetsOrderByCPU = "cpu"
DaemonSetsOrderByCPURequest = "cpu_request"
DaemonSetsOrderByCPULimit = "cpu_limit"
DaemonSetsOrderByMemory = "memory"
DaemonSetsOrderByMemoryRequest = "memory_request"
DaemonSetsOrderByMemoryLimit = "memory_limit"
DaemonSetsOrderByDesiredNodes = "desired_nodes"
DaemonSetsOrderByCurrentNodes = "current_nodes"
)
var DaemonSetsValidOrderByKeys = []string{
DaemonSetsOrderByCPU,
DaemonSetsOrderByCPURequest,
DaemonSetsOrderByCPULimit,
DaemonSetsOrderByMemory,
DaemonSetsOrderByMemoryRequest,
DaemonSetsOrderByMemoryLimit,
DaemonSetsOrderByDesiredNodes,
DaemonSetsOrderByCurrentNodes,
}

View File

@@ -1,273 +0,0 @@
package inframonitoringtypes
import (
"testing"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/stretchr/testify/require"
)
func TestPostableDaemonSets_Validate(t *testing.T) {
tests := []struct {
name string
req *PostableDaemonSets
wantErr bool
}{
{
name: "valid request",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: false,
},
{
name: "nil request",
req: nil,
wantErr: true,
},
{
name: "start time zero",
req: &PostableDaemonSets{
Start: 0,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time negative",
req: &PostableDaemonSets{
Start: -1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "end time zero",
req: &PostableDaemonSets{
Start: 1000,
End: 0,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time greater than end time",
req: &PostableDaemonSets{
Start: 2000,
End: 1000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time equal to end time",
req: &PostableDaemonSets{
Start: 1000,
End: 1000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "limit zero",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 0,
Offset: 0,
},
wantErr: true,
},
{
name: "limit negative",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: -10,
Offset: 0,
},
wantErr: true,
},
{
name: "limit exceeds max",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 5001,
Offset: 0,
},
wantErr: true,
},
{
name: "offset negative",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 100,
Offset: -5,
},
wantErr: true,
},
{
name: "orderBy nil is valid",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: false,
},
{
name: "orderBy with valid key cpu and direction asc",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: DaemonSetsOrderByCPU,
},
},
Direction: qbtypes.OrderDirectionAsc,
},
},
wantErr: false,
},
{
name: "orderBy with valid key memory_limit and direction desc",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: DaemonSetsOrderByMemoryLimit,
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: false,
},
{
name: "orderBy with valid key desired_nodes and direction desc",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: DaemonSetsOrderByDesiredNodes,
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: false,
},
{
name: "orderBy with valid key current_nodes and direction asc",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: DaemonSetsOrderByCurrentNodes,
},
},
Direction: qbtypes.OrderDirectionAsc,
},
},
wantErr: false,
},
{
name: "orderBy with restarts key is rejected",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "restarts",
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: true,
},
{
name: "orderBy with invalid key",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "unknown",
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: true,
},
{
name: "orderBy with valid key but invalid direction",
req: &PostableDaemonSets{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: DaemonSetsOrderByCPU,
},
},
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.req.Validate()
if tt.wantErr {
require.Error(t, err)
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
} else {
require.NoError(t, err)
}
})
}
}

View File

@@ -450,35 +450,6 @@ def build_scalar_query(
return {"type": "builder_query", "spec": spec}
def build_raw_query(
name: str,
signal: str,
*,
order: list[dict] | None = None,
limit: int | None = None,
filter_expression: str | None = None,
step_interval: int = DEFAULT_STEP_INTERVAL,
disabled: bool = False,
) -> dict:
spec: dict[str, Any] = {
"name": name,
"signal": signal,
"stepInterval": step_interval,
"disabled": disabled,
}
if order:
spec["order"] = order
if limit is not None:
spec["limit"] = limit
if filter_expression:
spec["filter"] = {"expression": filter_expression}
return {"type": "builder_query", "spec": spec}
def build_group_by_field(
name: str,
field_data_type: str = "string",

View File

@@ -11,7 +11,6 @@ from fixtures.logs import Logs
from fixtures.querier import (
build_logs_aggregation,
build_order_by,
build_raw_query,
build_scalar_query,
get_column_data_from_response,
get_rows,
@@ -28,33 +27,28 @@ def _run_query_case(signoz: types.SigNoz, token: str, now: datetime, case: dict[
start_ms = case.get("startMs", int((now - timedelta(seconds=10)).timestamp() * 1000))
end_ms = case.get("endMs", int(now.timestamp() * 1000))
if case["requestType"] == "raw":
query = build_raw_query(
name=case["name"],
signal="logs",
filter_expression=case.get("expression"),
order=case.get("order") or [build_order_by("timestamp", "desc")],
limit=case.get("limit", 100),
step_interval=case.get("stepInterval") or 60,
)
aggregation = case.get("aggregation")
if aggregation and not isinstance(aggregation, list):
aggregations = [build_logs_aggregation(aggregation)]
elif aggregation:
aggregations = aggregation
else:
aggregation = case.get("aggregation")
if aggregation and not isinstance(aggregation, list):
aggregations = [build_logs_aggregation(aggregation)]
elif aggregation:
aggregations = aggregation
else:
aggregations = []
query = build_scalar_query(
name=case["name"],
signal="logs",
aggregations=aggregations,
group_by=case.get("groupBy"),
order=case.get("order"),
limit=case.get("limit", 100),
filter_expression=case.get("expression"),
step_interval=case.get("stepInterval") or 60,
)
aggregations = []
order = case.get("order")
if order is None and case["requestType"] == "raw":
order = [build_order_by("timestamp", "desc")]
query = build_scalar_query(
name=case["name"],
signal="logs",
aggregations=aggregations,
group_by=case.get("groupBy"),
order=order,
limit=case.get("limit", 100),
filter_expression=case.get("expression"),
step_interval=case.get("stepInterval") or 60,
)
response = make_query_request(
signoz=signoz,
@@ -642,9 +636,10 @@ def test_select_order_by(
end_ms = int(now.timestamp() * 1000)
def _run(case: dict[str, Any]) -> None:
query = build_raw_query(
query = build_scalar_query(
name=case["name"],
signal="logs",
aggregations=[build_logs_aggregation("count()")],
order=case["order"],
limit=100,
step_interval=60,

View File

@@ -1,285 +0,0 @@
import json
from collections.abc import Callable
from datetime import UTC, datetime, timedelta
from typing import Any
import requests
from fixtures import types
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.logs import Logs
from fixtures.querier import (
build_group_by_field,
build_logs_aggregation,
build_order_by,
build_raw_query,
build_scalar_query,
get_rows,
get_scalar_table_data,
make_query_request,
)
def _raw(
signoz: types.SigNoz,
token: str,
start_ms: int,
end_ms: int,
name: str,
*,
expression: str | None = None,
order: list[dict] | None = None,
limit: int = 100,
) -> requests.Response:
q = build_raw_query(
name=name,
signal="logs",
filter_expression=expression,
order=order or [build_order_by("timestamp", "desc")],
limit=limit,
step_interval=60,
)
r = make_query_request(signoz, token, start_ms, end_ms, queries=[q], request_type="raw")
assert r.status_code == 200, f"HTTP {r.status_code} for '{name}': {r.text}"
return r
def _scalar(
signoz: types.SigNoz,
token: str,
start_ms: int,
end_ms: int,
name: str,
aggregation: str,
*,
expression: str | None = None,
group_by: list[dict] | None = None,
) -> requests.Response:
q = build_scalar_query(
name=name,
signal="logs",
aggregations=[build_logs_aggregation(aggregation)],
filter_expression=expression,
group_by=group_by,
step_interval=60,
)
r = make_query_request(signoz, token, start_ms, end_ms, queries=[q], request_type="scalar")
assert r.status_code == 200, f"HTTP {r.status_code} for '{name}': {r.text}"
return r
def _body_users(response: requests.Response) -> set[str | None]:
return {json.loads(row["data"]["body"]).get("user") for row in get_rows(response)}
def _body_scores(response: requests.Response) -> list[int | None]:
return [json.loads(row["data"]["body"]).get("score") for row in get_rows(response)]
def _services(response: requests.Response) -> list[str]:
return [row["data"]["resources_string"].get("service.name", "") for row in get_rows(response)]
def _counts(response: requests.Response) -> dict[str, Any]:
return {str(row[0]): row[-1] for row in get_scalar_table_data(response.json()) if row}
def _run_case(
signoz: types.SigNoz,
token: str,
start_ms: int,
end_ms: int,
case: dict[str, Any],
) -> None:
if case["requestType"] == "raw":
response = _raw(signoz, token, start_ms, end_ms, case["name"], expression=case.get("expression"), order=case.get("order"))
else:
response = _scalar(signoz, token, start_ms, end_ms, case["name"], case["aggregation"], expression=case.get("expression"), group_by=case.get("groupBy"))
assert case["validate"](response), f"Validation failed for '{case['name']}': {response.json()}"
# ============================================================================
# Filter · GroupBy · Aggregation — non-body fields across all three contexts
#
# Five cases, one dataset. Each case crosses a different combination of
# resource attr / log attr / top-level field in WHERE, GROUP BY, and agg:
#
# case 1 filter resource + log attr + top-level in WHERE (raw)
# case 2 group by resource × top-level multi-key (scalar)
# case 3 aggregation count_distinct(log attr) grouped by top-level (scalar)
# case 4 agg+filter count by resource, body-field WHERE guard (scalar)
# case 5 agg+filter count_distinct(resource) by log attr, top-level filter (scalar)
#
# Data landscape (5 logs):
# log1 — auth-svc, GET, INFO, score=80, user=alice
# log2 — auth-svc, POST, ERROR, score=90, user=bob
# log3 — auth-svc, GET, INFO, score=60, user=carol
# log4 — api-gw, GET, WARN, score=70, user=diana
# log5 — worker, DELETE, ERROR, score=100, user=eve
# ============================================================================
def test_non_body_filter_groupby_aggregation(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_logs: Callable[[list[Logs]], None],
export_json_types: Callable[[list[Logs]], None],
) -> None:
now = datetime.now(tz=UTC)
start_ms = int((now - timedelta(seconds=10)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
log_data = [
("auth-svc", "GET", "INFO", {"score": 80, "user": "alice"}),
("auth-svc", "POST", "ERROR", {"score": 90, "user": "bob"}),
("auth-svc", "GET", "INFO", {"score": 60, "user": "carol"}),
("api-gw", "GET", "WARN", {"score": 70, "user": "diana"}),
("worker", "DELETE", "ERROR", {"score": 100, "user": "eve"}),
]
logs_list = [
Logs(
timestamp=now - timedelta(seconds=len(log_data) - i),
resources={"service.name": svc},
attributes={"http.method": method},
body_v2=json.dumps(body),
body_promoted="",
severity_text=sev,
)
for i, (svc, method, sev, body) in enumerate(log_data)
]
export_json_types(logs_list)
insert_logs(logs_list)
token = get_token(email=USER_ADMIN_EMAIL, password=USER_ADMIN_PASSWORD)
cases = [
# 1. Filter — resource + log attr + top-level in WHERE (all three non-body contexts at once)
{
"name": "filter.cross_context",
"requestType": "raw",
"expression": 'service.name = "auth-svc" AND http.method = "GET" AND severity_text = "INFO"',
"validate": lambda r: len(get_rows(r)) == 2 and _body_users(r) == {"alice", "carol"},
},
# 2. GroupBy — resource × top-level multi-key, no filter
# Proves both contexts resolve correctly as simultaneous GROUP BY keys.
{
"name": "groupby.resource_x_toplevel",
"requestType": "scalar",
"expression": None,
"groupBy": [build_group_by_field("service.name"), {"name": "severity_text"}],
"aggregation": "count()",
# auth-svc+INFO=2, auth-svc+ERROR=1, api-gw+WARN=1, worker+ERROR=1
"validate": lambda r: (p := {(str(row[0]), str(row[1])): row[-1] for row in get_scalar_table_data(r.json()) if len(row) >= 3}) and p.get(("auth-svc", "INFO")) == 2 and p.get(("auth-svc", "ERROR")) == 1 and p.get(("api-gw", "WARN")) == 1 and p.get(("worker", "ERROR")) == 1,
},
# 3. Aggregation — count_distinct(log attr) grouped by top-level
# ERROR logs use {POST, DELETE} → 2 distinct methods; INFO/WARN use only GET → 1.
{
"name": "agg.count_distinct_attr_by_toplevel",
"requestType": "scalar",
"expression": None,
"groupBy": [{"name": "severity_text"}],
"aggregation": "count_distinct(http.method)",
"validate": lambda r: (rows := _counts(r)) and int(rows["INFO"]) == 1 and int(rows["ERROR"]) == 2 and int(rows["WARN"]) == 1,
},
# 4. Aggregation + body filter — count by resource WHERE body score >= 80
# Body field gates the logs; non-body field drives the GROUP BY.
{
"name": "agg.count_by_resource_body_filter",
"requestType": "scalar",
"expression": "score >= 80",
"groupBy": [build_group_by_field("service.name")],
"aggregation": "count()",
# score>=80: alice(80), bob(90), eve(100) → auth-svc: 2, worker: 1; api-gw excluded
"validate": lambda r: (rows := _counts(r)) and int(rows["auth-svc"]) == 2 and int(rows["worker"]) == 1 and "api-gw" not in rows,
},
# 5. Aggregation + top-level filter — count_distinct(resource) grouped by log attr
# Aggregates a resource attr, groups by a log attr, filtered by a top-level field.
{
"name": "agg.count_distinct_resource_by_attr_toplevel_filter",
"requestType": "scalar",
"expression": "severity_text IN ['INFO', 'WARN']",
"groupBy": [{"name": "http.method"}],
"aggregation": "count_distinct(service.name)",
# INFO/WARN logs: GET(auth-svc×2, api-gw) → 2 distinct svcs; POST/DELETE excluded
"validate": lambda r: (rows := _counts(r)) and int(rows["GET"]) == 2 and "POST" not in rows and "DELETE" not in rows,
},
]
for case in cases:
case.setdefault("groupBy", None)
_run_case(signoz, token, start_ms, end_ms, case)
# ============================================================================
# OrderBy — non-body fields as primary sort keys
#
# Four cases cover every non-body context as the primary ORDER BY key:
# orderby.service_asc resource attr (service.name ASC)
# orderby.timestamp_desc top-level (timestamp DESC)
# orderby.severity_asc top-level (severity_text ASC)
# orderby.multi_method_then_score log attr primary, body path secondary
#
# Data landscape:
# log1 — svc-a, GET, INFO, score=80, ts=now-4s
# log2 — svc-a, POST, INFO, score=90, ts=now-3s
# log3 — svc-b, GET, WARN, score=60, ts=now-2s
# log4 — svc-b, DELETE, WARN, score=70, ts=now-1s
# ============================================================================
def test_non_body_orderby(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_logs: Callable[[list[Logs]], None],
export_json_types: Callable[[list[Logs]], None],
) -> None:
now = datetime.now(tz=UTC)
start_ms = int((now - timedelta(seconds=10)).timestamp() * 1000)
end_ms = int(now.timestamp() * 1000)
logs_list = [
Logs(timestamp=now - timedelta(seconds=4), resources={"service.name": "svc-a"}, attributes={"http.method": "GET"}, body_v2=json.dumps({"score": 80}), body_promoted="", severity_text="INFO"),
Logs(timestamp=now - timedelta(seconds=3), resources={"service.name": "svc-a"}, attributes={"http.method": "POST"}, body_v2=json.dumps({"score": 90}), body_promoted="", severity_text="INFO"),
Logs(timestamp=now - timedelta(seconds=2), resources={"service.name": "svc-b"}, attributes={"http.method": "GET"}, body_v2=json.dumps({"score": 60}), body_promoted="", severity_text="WARN"),
Logs(timestamp=now - timedelta(seconds=1), resources={"service.name": "svc-b"}, attributes={"http.method": "DELETE"}, body_v2=json.dumps({"score": 70}), body_promoted="", severity_text="WARN"),
]
export_json_types(logs_list)
insert_logs(logs_list)
token = get_token(email=USER_ADMIN_EMAIL, password=USER_ADMIN_PASSWORD)
cases = [
# resource attr ASC: svc-a×2 before svc-b×2
{
"name": "orderby.service_asc",
"requestType": "raw",
"order": [build_order_by("service.name", "asc")],
"validate": lambda r: len(get_rows(r)) == 4 and _services(r)[:2] == ["svc-a", "svc-a"] and _services(r)[2:] == ["svc-b", "svc-b"],
},
# top-level timestamp DESC: ts-1s(svc-b/70), ts-2s(svc-b/60), ts-3s(svc-a/90), ts-4s(svc-a/80)
{
"name": "orderby.timestamp_desc",
"requestType": "raw",
"order": [build_order_by("timestamp", "desc")],
"validate": lambda r: len(get_rows(r)) == 4 and _body_scores(r) == [70, 60, 90, 80] and _services(r) == ["svc-b", "svc-b", "svc-a", "svc-a"],
},
# top-level severity_text ASC: INFO(svc-a×2) before WARN(svc-b×2)
{
"name": "orderby.severity_asc",
"requestType": "raw",
"order": [build_order_by("severity_text", "asc")],
"validate": lambda r: len(get_rows(r)) == 4 and _services(r)[:2] == ["svc-a", "svc-a"] and _services(r)[2:] == ["svc-b", "svc-b"],
},
# multi-key: http.method ASC then score ASC — DELETE(70), GET(60,80), POST(90)
{
"name": "orderby.multi_method_then_score",
"requestType": "raw",
"order": [build_order_by("http.method", "asc"), build_order_by("score", "asc")],
# DELETE < GET < POST alphabetically; within GET scores go 60→80
"validate": lambda r: len(get_rows(r)) == 4 and _body_scores(r) == [70, 60, 80, 90],
},
]
for case in cases:
case.setdefault("groupBy", None)
_run_case(signoz, token, start_ms, end_ms, case)