Compare commits

..

1 Commits

Author SHA1 Message Date
Jatinderjit Singh
bd3d75f92a fix: maintenance ignores recurrence when fixed times also set 2026-04-27 11:43:10 +05:30
39 changed files with 192 additions and 3116 deletions

View File

@@ -2474,164 +2474,6 @@ components:
- requiredMetricsCheck
- endTimeBeforeRetention
type: object
InframonitoringtypesNodeCondition:
enum:
- ready
- not_ready
- ""
type: string
InframonitoringtypesNodeRecord:
properties:
condition:
$ref: '#/components/schemas/InframonitoringtypesNodeCondition'
meta:
additionalProperties: {}
nullable: true
type: object
nodeCPU:
format: double
type: number
nodeCPUAllocatable:
format: double
type: number
nodeMemory:
format: double
type: number
nodeMemoryAllocatable:
format: double
type: number
nodeName:
type: string
notReadyNodeCount:
type: integer
readyNodeCount:
type: integer
required:
- nodeName
- condition
- readyNodeCount
- notReadyNodeCount
- nodeCPU
- nodeCPUAllocatable
- nodeMemory
- nodeMemoryAllocatable
- meta
type: object
InframonitoringtypesNodes:
properties:
endTimeBeforeRetention:
type: boolean
records:
items:
$ref: '#/components/schemas/InframonitoringtypesNodeRecord'
nullable: true
type: array
requiredMetricsCheck:
$ref: '#/components/schemas/InframonitoringtypesRequiredMetricsCheck'
total:
type: integer
type:
$ref: '#/components/schemas/InframonitoringtypesResponseType'
warning:
$ref: '#/components/schemas/Querybuildertypesv5QueryWarnData'
required:
- type
- records
- total
- requiredMetricsCheck
- endTimeBeforeRetention
type: object
InframonitoringtypesPodPhase:
enum:
- pending
- running
- succeeded
- failed
- unknown
- ""
type: string
InframonitoringtypesPodRecord:
properties:
failedPodCount:
type: integer
meta:
additionalProperties: {}
nullable: true
type: object
pendingPodCount:
type: integer
podAge:
format: int64
type: integer
podCPU:
format: double
type: number
podCPULimit:
format: double
type: number
podCPURequest:
format: double
type: number
podMemory:
format: double
type: number
podMemoryLimit:
format: double
type: number
podMemoryRequest:
format: double
type: number
podPhase:
$ref: '#/components/schemas/InframonitoringtypesPodPhase'
podUID:
type: string
runningPodCount:
type: integer
succeededPodCount:
type: integer
unknownPodCount:
type: integer
required:
- podUID
- podCPU
- podCPURequest
- podCPULimit
- podMemory
- podMemoryRequest
- podMemoryLimit
- podPhase
- pendingPodCount
- runningPodCount
- succeededPodCount
- failedPodCount
- unknownPodCount
- podAge
- meta
type: object
InframonitoringtypesPods:
properties:
endTimeBeforeRetention:
type: boolean
records:
items:
$ref: '#/components/schemas/InframonitoringtypesPodRecord'
nullable: true
type: array
requiredMetricsCheck:
$ref: '#/components/schemas/InframonitoringtypesRequiredMetricsCheck'
total:
type: integer
type:
$ref: '#/components/schemas/InframonitoringtypesResponseType'
warning:
$ref: '#/components/schemas/Querybuildertypesv5QueryWarnData'
required:
- type
- records
- total
- requiredMetricsCheck
- endTimeBeforeRetention
type: object
InframonitoringtypesPostableHosts:
properties:
end:
@@ -2658,58 +2500,6 @@ components:
- end
- limit
type: object
InframonitoringtypesPostableNodes:
properties:
end:
format: int64
type: integer
filter:
$ref: '#/components/schemas/Querybuildertypesv5Filter'
groupBy:
items:
$ref: '#/components/schemas/Querybuildertypesv5GroupByKey'
nullable: true
type: array
limit:
type: integer
offset:
type: integer
orderBy:
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
start:
format: int64
type: integer
required:
- start
- end
- limit
type: object
InframonitoringtypesPostablePods:
properties:
end:
format: int64
type: integer
filter:
$ref: '#/components/schemas/Querybuildertypesv5Filter'
groupBy:
items:
$ref: '#/components/schemas/Querybuildertypesv5GroupByKey'
nullable: true
type: array
limit:
type: integer
offset:
type: integer
orderBy:
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
start:
format: int64
type: integer
required:
- start
- end
- limit
type: object
InframonitoringtypesRequiredMetricsCheck:
properties:
missingMetrics:
@@ -10437,9 +10227,7 @@ paths:
five metrics, and pagination via offset/limit. The response type is ''list''
for the default host.name grouping or ''grouped_list'' for custom groupBy
keys. Also reports missing required metrics and whether the requested time
range falls before the data retention boundary. Numeric metric fields (cpu,
memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available
for that field; frontends should render ''—'' rather than the literal value.'
range falls before the data retention boundary.'
operationId: ListHosts
requestBody:
content:
@@ -10493,151 +10281,6 @@ paths:
summary: List Hosts for Infra Monitoring
tags:
- inframonitoring
/api/v2/infra_monitoring/nodes:
post:
deprecated: false
description: 'Returns a paginated list of Kubernetes nodes with key metrics:
CPU usage, CPU allocatable, memory working set, memory allocatable, and per-group
readyNodeCount / notReadyNodeCount derived from each node''s latest k8s.node.condition_ready
value in the window. Each node includes metadata attributes (k8s.node.uid,
k8s.cluster.name). The response type is ''list'' for the default k8s.node.name
grouping (each row is one node with its current condition string: ready /
not_ready / '''') or ''grouped_list'' for custom groupBy keys (each row aggregates
nodes in the group with readyNodeCount and notReadyNodeCount; condition stays
empty). Supports filtering via a filter expression, custom groupBy, ordering
by cpu / cpu_allocatable / memory / memory_allocatable, and pagination via
offset/limit. Also reports missing required metrics and whether the requested
time range falls before the data retention boundary. Numeric metric fields
(nodeCPU, nodeCPUAllocatable, nodeMemory, nodeMemoryAllocatable) return -1
as a sentinel when no data is available for that field; frontends should render
''—'' rather than the literal value.'
operationId: ListNodes
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/InframonitoringtypesPostableNodes'
responses:
"200":
content:
application/json:
schema:
properties:
data:
$ref: '#/components/schemas/InframonitoringtypesNodes'
status:
type: string
required:
- status
- data
type: object
description: OK
"400":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Bad Request
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- VIEWER
- tokenizer:
- VIEWER
summary: List Nodes for Infra Monitoring
tags:
- inframonitoring
/api/v2/infra_monitoring/pods:
post:
deprecated: false
description: 'Returns a paginated list of Kubernetes pods with key metrics:
CPU usage, CPU request/limit utilization, memory working set, memory request/limit
utilization, current pod phase (pending/running/succeeded/failed/unknown),
and pod age (ms since start time). Each pod includes metadata attributes (namespace,
node, workload owner such as deployment/statefulset/daemonset/job/cronjob,
cluster). Supports filtering via a filter expression, custom groupBy to aggregate
pods by any attribute, ordering by any of the six metrics (cpu, cpu_request,
cpu_limit, memory, memory_request, memory_limit), and pagination via offset/limit.
The response type is ''list'' for the default k8s.pod.uid grouping (each row
is one pod with its current phase) or ''grouped_list'' for custom groupBy
keys (each row aggregates pods in the group with per-phase counts: pendingPodCount,
runningPodCount, succeededPodCount, failedPodCount, unknownPodCount derived
from each pod''s latest phase in the window). Also reports missing required
metrics and whether the requested time range falls before the data retention
boundary. Numeric metric fields (podCPU, podCPURequest, podCPULimit, podMemory,
podMemoryRequest, podMemoryLimit, podAge) return -1 as a sentinel when no
data is available for that field; frontends should render ''—'' rather than
the literal value.'
operationId: ListPods
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/InframonitoringtypesPostablePods'
responses:
"200":
content:
application/json:
schema:
properties:
data:
$ref: '#/components/schemas/InframonitoringtypesPods'
status:
type: string
required:
- status
- data
type: object
description: OK
"400":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Bad Request
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- VIEWER
- tokenizer:
- VIEWER
summary: List Pods for Infra Monitoring
tags:
- inframonitoring
/api/v2/livez:
get:
deprecated: false

View File

@@ -13,11 +13,7 @@ import type {
import type {
InframonitoringtypesPostableHostsDTO,
InframonitoringtypesPostableNodesDTO,
InframonitoringtypesPostablePodsDTO,
ListHosts200,
ListNodes200,
ListPods200,
RenderErrorResponseDTO,
} from '../sigNoz.schemas';
@@ -25,7 +21,7 @@ import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
import type { ErrorType, BodyType } from '../../../generatedAPIInstance';
/**
* Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (cpu, memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available for that field; frontends should render '—' rather than the literal value.
* Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary.
* @summary List Hosts for Infra Monitoring
*/
export const listHosts = (
@@ -108,171 +104,3 @@ export const useListHosts = <
return useMutation(mutationOptions);
};
/**
* Returns a paginated list of Kubernetes nodes with key metrics: CPU usage, CPU allocatable, memory working set, memory allocatable, and per-group readyNodeCount / notReadyNodeCount derived from each node's latest k8s.node.condition_ready value in the window. Each node includes metadata attributes (k8s.node.uid, k8s.cluster.name). The response type is 'list' for the default k8s.node.name grouping (each row is one node with its current condition string: ready / not_ready / '') or 'grouped_list' for custom groupBy keys (each row aggregates nodes in the group with readyNodeCount and notReadyNodeCount; condition stays empty). Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_allocatable / memory / memory_allocatable, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (nodeCPU, nodeCPUAllocatable, nodeMemory, nodeMemoryAllocatable) return -1 as a sentinel when no data is available for that field; frontends should render '—' rather than the literal value.
* @summary List Nodes for Infra Monitoring
*/
export const listNodes = (
inframonitoringtypesPostableNodesDTO: BodyType<InframonitoringtypesPostableNodesDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListNodes200>({
url: `/api/v2/infra_monitoring/nodes`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesPostableNodesDTO,
signal,
});
};
export const getListNodesMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listNodes>>,
TError,
{ data: BodyType<InframonitoringtypesPostableNodesDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof listNodes>>,
TError,
{ data: BodyType<InframonitoringtypesPostableNodesDTO> },
TContext
> => {
const mutationKey = ['listNodes'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof listNodes>>,
{ data: BodyType<InframonitoringtypesPostableNodesDTO> }
> = (props) => {
const { data } = props ?? {};
return listNodes(data);
};
return { mutationFn, ...mutationOptions };
};
export type ListNodesMutationResult = NonNullable<
Awaited<ReturnType<typeof listNodes>>
>;
export type ListNodesMutationBody =
BodyType<InframonitoringtypesPostableNodesDTO>;
export type ListNodesMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List Nodes for Infra Monitoring
*/
export const useListNodes = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listNodes>>,
TError,
{ data: BodyType<InframonitoringtypesPostableNodesDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof listNodes>>,
TError,
{ data: BodyType<InframonitoringtypesPostableNodesDTO> },
TContext
> => {
const mutationOptions = getListNodesMutationOptions(options);
return useMutation(mutationOptions);
};
/**
* Returns a paginated list of Kubernetes pods with key metrics: CPU usage, CPU request/limit utilization, memory working set, memory request/limit utilization, current pod phase (pending/running/succeeded/failed/unknown), and pod age (ms since start time). Each pod includes metadata attributes (namespace, node, workload owner such as deployment/statefulset/daemonset/job/cronjob, cluster). Supports filtering via a filter expression, custom groupBy to aggregate pods by any attribute, ordering by any of the six metrics (cpu, cpu_request, cpu_limit, memory, memory_request, memory_limit), and pagination via offset/limit. The response type is 'list' for the default k8s.pod.uid grouping (each row is one pod with its current phase) or 'grouped_list' for custom groupBy keys (each row aggregates pods in the group with per-phase counts: pendingPodCount, runningPodCount, succeededPodCount, failedPodCount, unknownPodCount derived from each pod's latest phase in the window). Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (podCPU, podCPURequest, podCPULimit, podMemory, podMemoryRequest, podMemoryLimit, podAge) return -1 as a sentinel when no data is available for that field; frontends should render '—' rather than the literal value.
* @summary List Pods for Infra Monitoring
*/
export const listPods = (
inframonitoringtypesPostablePodsDTO: BodyType<InframonitoringtypesPostablePodsDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListPods200>({
url: `/api/v2/infra_monitoring/pods`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesPostablePodsDTO,
signal,
});
};
export const getListPodsMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listPods>>,
TError,
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof listPods>>,
TError,
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
TContext
> => {
const mutationKey = ['listPods'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof listPods>>,
{ data: BodyType<InframonitoringtypesPostablePodsDTO> }
> = (props) => {
const { data } = props ?? {};
return listPods(data);
};
return { mutationFn, ...mutationOptions };
};
export type ListPodsMutationResult = NonNullable<
Awaited<ReturnType<typeof listPods>>
>;
export type ListPodsMutationBody =
BodyType<InframonitoringtypesPostablePodsDTO>;
export type ListPodsMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List Pods for Infra Monitoring
*/
export const useListPods = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listPods>>,
TError,
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof listPods>>,
TError,
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
TContext
> => {
const mutationOptions = getListPodsMutationOptions(options);
return useMutation(mutationOptions);
};

View File

@@ -3243,180 +3243,6 @@ export interface InframonitoringtypesHostsDTO {
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export enum InframonitoringtypesNodeConditionDTO {
ready = 'ready',
not_ready = 'not_ready',
'' = '',
}
/**
* @nullable
*/
export type InframonitoringtypesNodeRecordDTOMeta = {
[key: string]: unknown;
} | null;
export interface InframonitoringtypesNodeRecordDTO {
condition: InframonitoringtypesNodeConditionDTO;
/**
* @type object
* @nullable true
*/
meta: InframonitoringtypesNodeRecordDTOMeta;
/**
* @type number
* @format double
*/
nodeCPU: number;
/**
* @type number
* @format double
*/
nodeCPUAllocatable: number;
/**
* @type number
* @format double
*/
nodeMemory: number;
/**
* @type number
* @format double
*/
nodeMemoryAllocatable: number;
/**
* @type string
*/
nodeName: string;
/**
* @type integer
*/
notReadyNodeCount: number;
/**
* @type integer
*/
readyNodeCount: number;
}
export interface InframonitoringtypesNodesDTO {
/**
* @type boolean
*/
endTimeBeforeRetention: boolean;
/**
* @type array
* @nullable true
*/
records: InframonitoringtypesNodeRecordDTO[] | null;
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
/**
* @type integer
*/
total: number;
type: InframonitoringtypesResponseTypeDTO;
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export enum InframonitoringtypesPodPhaseDTO {
pending = 'pending',
running = 'running',
succeeded = 'succeeded',
failed = 'failed',
unknown = 'unknown',
'' = '',
}
/**
* @nullable
*/
export type InframonitoringtypesPodRecordDTOMeta = {
[key: string]: unknown;
} | null;
export interface InframonitoringtypesPodRecordDTO {
/**
* @type integer
*/
failedPodCount: number;
/**
* @type object
* @nullable true
*/
meta: InframonitoringtypesPodRecordDTOMeta;
/**
* @type integer
*/
pendingPodCount: number;
/**
* @type integer
* @format int64
*/
podAge: number;
/**
* @type number
* @format double
*/
podCPU: number;
/**
* @type number
* @format double
*/
podCPULimit: number;
/**
* @type number
* @format double
*/
podCPURequest: number;
/**
* @type number
* @format double
*/
podMemory: number;
/**
* @type number
* @format double
*/
podMemoryLimit: number;
/**
* @type number
* @format double
*/
podMemoryRequest: number;
podPhase: InframonitoringtypesPodPhaseDTO;
/**
* @type string
*/
podUID: string;
/**
* @type integer
*/
runningPodCount: number;
/**
* @type integer
*/
succeededPodCount: number;
/**
* @type integer
*/
unknownPodCount: number;
}
export interface InframonitoringtypesPodsDTO {
/**
* @type boolean
*/
endTimeBeforeRetention: boolean;
/**
* @type array
* @nullable true
*/
records: InframonitoringtypesPodRecordDTO[] | null;
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
/**
* @type integer
*/
total: number;
type: InframonitoringtypesResponseTypeDTO;
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export interface InframonitoringtypesPostableHostsDTO {
/**
* @type integer
@@ -3445,62 +3271,6 @@ export interface InframonitoringtypesPostableHostsDTO {
start: number;
}
export interface InframonitoringtypesPostableNodesDTO {
/**
* @type integer
* @format int64
*/
end: number;
filter?: Querybuildertypesv5FilterDTO;
/**
* @type array
* @nullable true
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start: number;
}
export interface InframonitoringtypesPostablePodsDTO {
/**
* @type integer
* @format int64
*/
end: number;
filter?: Querybuildertypesv5FilterDTO;
/**
* @type array
* @nullable true
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start: number;
}
export interface InframonitoringtypesRequiredMetricsCheckDTO {
/**
* @type array
@@ -7337,22 +7107,6 @@ export type ListHosts200 = {
status: string;
};
export type ListNodes200 = {
data: InframonitoringtypesNodesDTO;
/**
* @type string
*/
status: string;
};
export type ListPods200 = {
data: InframonitoringtypesPodsDTO;
/**
* @type string
*/
status: string;
};
export type Livez200 = {
data: FactoryResponseDTO;
/**

View File

@@ -10,7 +10,6 @@ import cx from 'classnames';
import { LogType } from 'components/Logs/LogStateIndicator/LogStateIndicator';
import QuerySearch from 'components/QueryBuilderV2/QueryV2/QuerySearch/QuerySearch';
import { convertExpressionToFilters } from 'components/QueryBuilderV2/utils';
import { FeatureKeys } from 'constants/features';
import { LOCALSTORAGE } from 'constants/localStorage';
import { QueryParams } from 'constants/query';
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
@@ -47,7 +46,6 @@ import {
TextSelect,
X,
} from 'lucide-react';
import { useAppContext } from 'providers/App/App';
import { AppState } from 'store/reducers';
import { Query, TagFilter } from 'types/api/queryBuilder/queryBuilderData';
import { DataSource, StringOperators } from 'types/common/queryBuilder';
@@ -81,10 +79,6 @@ function LogDetailInner({
const [selectedView, setSelectedView] = useState<VIEWS>(selectedTab);
const [isFilterVisible, setIsFilterVisible] = useState<boolean>(false);
const { featureFlags } = useAppContext();
const isBodyJsonQueryEnabled =
featureFlags?.find((flag) => flag.name === FeatureKeys.BODY_JSON_ENABLED)
?.active || false;
const [filters, setFilters] = useState<TagFilter | null>(null);
const [isEdit, setIsEdit] = useState<boolean>(false);
@@ -214,29 +208,11 @@ function LogDetailInner({
}
};
const logBody = useMemo(() => {
if (!isBodyJsonQueryEnabled) {
return log?.body || '';
}
try {
const json = JSON.parse(log?.body || '');
if (typeof json?.message === 'string' && json.message !== '') {
return json.message;
}
return log?.body || '';
} catch (error) {
return log?.body || '';
}
}, [isBodyJsonQueryEnabled, log?.body]);
const htmlBody = useMemo(
() => ({
__html: getSanitizedLogBody(logBody || '', { shouldEscapeHtml: true }),
__html: getSanitizedLogBody(log?.body || '', { shouldEscapeHtml: true }),
}),
[logBody],
[log?.body],
);
const handleJSONCopy = (): void => {
@@ -442,7 +418,7 @@ function LogDetailInner({
<div className="log-detail-drawer__log">
<Divider type="vertical" className={cx('log-type-indicator', logType)} />
<Tooltip
title={removeEscapeCharacters(logBody)}
title={removeEscapeCharacters(log?.body)}
placement="left"
mouseLeaveDelay={0}
>

View File

@@ -9,5 +9,4 @@ export enum FeatureKeys {
ANOMALY_DETECTION = 'anomaly_detection',
ONBOARDING_V3 = 'onboarding_v3',
DOT_METRICS_ENABLED = 'dot_metrics_enabled',
BODY_JSON_ENABLED = 'body_json_enabled',
}

View File

@@ -139,8 +139,8 @@ function ChartPreview({
if (startTime && endTime && startTime !== endTime) {
dispatch(
UpdateTimeInterval('custom', [
Number.parseInt(getTimeString(startTime), 10),
Number.parseInt(getTimeString(endTime), 10),
parseInt(getTimeString(startTime), 10),
parseInt(getTimeString(endTime), 10),
]),
);
}

View File

@@ -370,7 +370,7 @@ function FormAlertRules({
// onQueryCategoryChange handles changes to query category
// in state as well as sets additional defaults
const onQueryCategoryChange = (val: EQueryType): void => {
const element = document.querySelector('#top');
const element = document.getElementById('top');
if (element) {
element.scrollIntoView({ behavior: 'smooth' });
}

View File

@@ -8,19 +8,8 @@ import {
OPERATORS,
QUERY_BUILDER_FUNCTIONS,
} from 'constants/antlrQueryConstants';
import { FeatureKeys } from 'constants/features';
import { QueryParams } from 'constants/query';
import { useActiveLog } from 'hooks/logs/useActiveLog';
import { useGetSearchQueryParam } from 'hooks/queryBuilder/useGetSearchQueryParam';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { ICurrentQueryData } from 'hooks/useHandleExplorerTabChange';
import { useNotifications } from 'hooks/useNotifications';
import { ExplorerViews } from 'pages/LogsExplorer/utils';
import { useAppContext } from 'providers/App/App';
import {
BaseAutocompleteData,
DataTypes,
} from 'types/api/queryBuilder/queryAutocompleteResponse';
import { TitleWrapper } from './BodyTitleRenderer.styles';
import { DROPDOWN_KEY } from './constant';
@@ -36,32 +25,17 @@ function BodyTitleRenderer({
parentIsArray = false,
nodeKey,
value,
handleChangeSelectedView,
}: BodyTitleRendererProps): JSX.Element {
const { onAddToQuery } = useActiveLog();
const { stagedQuery, updateQueriesData } = useQueryBuilder();
const { featureFlags } = useAppContext();
const [, setCopy] = useCopyToClipboard();
const { notifications } = useNotifications();
const viewName = useGetSearchQueryParam(QueryParams.viewName) || '';
const cleanedNodeKey = removeObjectFromString(nodeKey);
const isBodyJsonQueryEnabled =
featureFlags?.find((flag) => flag.name === FeatureKeys.BODY_JSON_ENABLED)
?.active || false;
// Group by is supported only for body json query enabled and not for array elements
const isGroupBySupported =
isBodyJsonQueryEnabled && !cleanedNodeKey.includes('[]');
const filterHandler = (isFilterIn: boolean) => (): void => {
if (parentIsArray) {
onAddToQuery(
generateFieldKeyForArray(
cleanedNodeKey,
removeObjectFromString(nodeKey),
getDataTypes(value),
isBodyJsonQueryEnabled,
),
`${value}`,
isFilterIn
@@ -71,7 +45,7 @@ function BodyTitleRenderer({
);
} else {
onAddToQuery(
`body.${cleanedNodeKey}`,
`body.${removeObjectFromString(nodeKey)}`,
`${value}`,
isFilterIn ? OPERATORS['='] : OPERATORS['!='],
getDataTypes(value),
@@ -79,67 +53,10 @@ function BodyTitleRenderer({
}
};
const groupByHandler = useCallback((): void => {
if (!stagedQuery) {
return;
}
const groupByKey = parentIsArray
? generateFieldKeyForArray(
cleanedNodeKey,
getDataTypes(value),
isBodyJsonQueryEnabled,
)
: `body.${cleanedNodeKey}`;
const fieldDataType = getDataTypes(value);
const normalizedDataType: DataTypes | undefined = Object.values(
DataTypes,
).includes(fieldDataType as DataTypes)
? (fieldDataType as DataTypes)
: undefined;
const updatedQuery = updateQueriesData(
stagedQuery,
'queryData',
(item, index) => {
if (index === 0) {
const newGroupByItem: BaseAutocompleteData = {
key: groupByKey,
type: '',
dataType: normalizedDataType,
};
return { ...item, groupBy: [...(item.groupBy || []), newGroupByItem] };
}
return item;
},
);
const queryData: ICurrentQueryData = {
name: viewName,
id: updatedQuery.id,
query: updatedQuery,
};
handleChangeSelectedView?.(ExplorerViews.TIMESERIES, queryData);
}, [
cleanedNodeKey,
handleChangeSelectedView,
isBodyJsonQueryEnabled,
parentIsArray,
stagedQuery,
updateQueriesData,
value,
viewName,
]);
const onClickHandler: MenuProps['onClick'] = (props): void => {
const mapper = {
[DROPDOWN_KEY.FILTER_IN]: filterHandler(true),
[DROPDOWN_KEY.FILTER_OUT]: filterHandler(false),
[DROPDOWN_KEY.GROUP_BY]: groupByHandler,
};
const handler = mapper[props.key];
@@ -159,14 +76,6 @@ function BodyTitleRenderer({
key: DROPDOWN_KEY.FILTER_OUT,
label: `Filter out ${value}`,
},
...(isGroupBySupported
? [
{
key: DROPDOWN_KEY.GROUP_BY,
label: `Group by ${nodeKey}`,
},
]
: []),
],
onClick: onClickHandler,
};
@@ -175,6 +84,7 @@ function BodyTitleRenderer({
(e: React.MouseEvent): void => {
// Prevent tree node expansion/collapse
e.stopPropagation();
const cleanedKey = removeObjectFromString(nodeKey);
let copyText: string;
// Check if value is an object or array
@@ -196,8 +106,8 @@ function BodyTitleRenderer({
if (copyText) {
const notificationMessage = isObject
? `${cleanedNodeKey} object copied to clipboard`
: `${cleanedNodeKey} copied to clipboard`;
? `${cleanedKey} object copied to clipboard`
: `${cleanedKey} copied to clipboard`;
notifications.success({
message: notificationMessage,
@@ -205,7 +115,7 @@ function BodyTitleRenderer({
});
}
},
[cleanedNodeKey, parentIsArray, setCopy, value, notifications],
[nodeKey, parentIsArray, setCopy, value, notifications],
);
return (

View File

@@ -1,4 +1,3 @@
import { ChangeViewFunctionType } from 'container/ExplorerOptions/types';
import { MetricsType } from 'container/MetricsApplication/constant';
import { ILog } from 'types/api/logs/log';
@@ -7,7 +6,6 @@ export interface BodyTitleRendererProps {
nodeKey: string;
value: unknown;
parentIsArray?: boolean;
handleChangeSelectedView?: ChangeViewFunctionType;
}
export type AnyObject = { [key: string]: any };

View File

@@ -2,7 +2,6 @@ import React, { useCallback, useMemo, useState } from 'react';
import { useLocation } from 'react-router-dom';
import { Color } from '@signozhq/design-tokens';
import { Button, Popover, Spin, Tooltip, Tree } from 'antd';
import type { DataNode } from 'antd/es/tree';
import GroupByIcon from 'assets/CustomIcons/GroupByIcon';
import cx from 'classnames';
import CopyClipboardHOC from 'components/Logs/CopyClipboardHOC';
@@ -58,7 +57,7 @@ interface ITableViewActionsProps {
}
// Memoized Tree Component
const MemoizedTree = React.memo<{ treeData: DataNode[] }>(({ treeData }) => (
const MemoizedTree = React.memo<{ treeData: any[] }>(({ treeData }) => (
<Tree
defaultExpandAll
showLine
@@ -75,54 +74,50 @@ const BodyContent: React.FC<{
record: DataType;
bodyHtml: { __html: string };
textToCopy: string;
handleChangeSelectedView?: ChangeViewFunctionType;
}> = React.memo(
({ fieldData, record, bodyHtml, textToCopy, handleChangeSelectedView }) => {
const { isLoading, treeData, error } = useAsyncJSONProcessing(
fieldData.value,
record.field === 'body',
handleChangeSelectedView,
}> = React.memo(({ fieldData, record, bodyHtml, textToCopy }) => {
const { isLoading, treeData, error } = useAsyncJSONProcessing(
fieldData.value,
record.field === 'body',
);
// Show JSON tree if available, otherwise show HTML content
if (record.field === 'body' && treeData) {
return <MemoizedTree treeData={treeData} />;
}
if (record.field === 'body' && isLoading) {
return (
<div style={{ display: 'flex', alignItems: 'center', gap: '8px' }}>
<Spin size="small" />
<span style={{ color: Color.BG_SIENNA_400 }}>Processing JSON...</span>
</div>
);
}
// Show JSON tree if available, otherwise show HTML content
if (record.field === 'body' && treeData) {
return <MemoizedTree treeData={treeData} />;
}
if (record.field === 'body' && error) {
return (
<span
style={{ color: Color.BG_SIENNA_400, whiteSpace: 'pre-wrap', tabSize: 4 }}
>
Error parsing Body JSON
</span>
);
}
if (record.field === 'body' && isLoading) {
return (
<div style={{ display: 'flex', alignItems: 'center', gap: '8px' }}>
<Spin size="small" />
<span style={{ color: Color.BG_SIENNA_400 }}>Processing JSON...</span>
</div>
);
}
if (record.field === 'body' && error) {
return (
if (record.field === 'body') {
return (
<CopyClipboardHOC entityKey="body" textToCopy={textToCopy}>
<span
style={{ color: Color.BG_SIENNA_400, whiteSpace: 'pre-wrap', tabSize: 4 }}
>
Error parsing Body JSON
<span dangerouslySetInnerHTML={bodyHtml} />
</span>
);
}
</CopyClipboardHOC>
);
}
if (record.field === 'body') {
return (
<CopyClipboardHOC entityKey="body" textToCopy={textToCopy}>
<span
style={{ color: Color.BG_SIENNA_400, whiteSpace: 'pre-wrap', tabSize: 4 }}
>
<span dangerouslySetInnerHTML={bodyHtml} />
</span>
</CopyClipboardHOC>
);
}
return null;
},
);
return null;
});
BodyContent.displayName = 'BodyContent';
@@ -324,7 +319,6 @@ export default function TableViewActions(
record={record}
bodyHtml={bodyHtml}
textToCopy={textToCopy}
handleChangeSelectedView={handleChangeSelectedView}
/>
);
@@ -348,7 +342,6 @@ export default function TableViewActions(
fieldData,
bodyHtml,
textToCopy,
handleChangeSelectedView,
formatTimezoneAdjustedTimestamp,
cleanTimestamp,
]);
@@ -362,7 +355,6 @@ export default function TableViewActions(
record={record}
bodyHtml={bodyHtml}
textToCopy={textToCopy}
handleChangeSelectedView={handleChangeSelectedView}
/>
{!isListViewPanel &&
!RESTRICTED_SELECTED_FIELDS.includes(fieldFilterKey) && (

View File

@@ -1,8 +1,5 @@
import { useEffect, useRef, useState } from 'react';
import { FeatureKeys } from 'constants/features';
import { ChangeViewFunctionType } from 'container/ExplorerOptions/types';
import { isEmpty } from 'lodash-es';
import { useAppContext } from 'providers/App/App';
import { jsonToDataNodes, recursiveParseJSON } from '../utils';
@@ -12,7 +9,6 @@ const MAX_BODY_BYTES = 100 * 1024; // 100 KB
const useAsyncJSONProcessing = (
value: string,
shouldProcess: boolean,
handleChangeSelectedView?: ChangeViewFunctionType,
): {
isLoading: boolean;
treeData: any[] | null;
@@ -29,10 +25,6 @@ const useAsyncJSONProcessing = (
});
const processingRef = useRef<boolean>(false);
const { featureFlags } = useAppContext();
const isBodyJsonQueryEnabled =
featureFlags?.find((flag) => flag.name === FeatureKeys.BODY_JSON_ENABLED)
?.active || false;
// eslint-disable-next-line sonarjs/cognitive-complexity
useEffect((): (() => void) => {
@@ -55,10 +47,7 @@ const useAsyncJSONProcessing = (
try {
const parsedBody = recursiveParseJSON(value);
if (!isEmpty(parsedBody)) {
const treeData = jsonToDataNodes(parsedBody, {
isBodyJsonQueryEnabled,
handleChangeSelectedView,
});
const treeData = jsonToDataNodes(parsedBody);
setJsonState({ isLoading: false, treeData, error: null });
} else {
setJsonState({ isLoading: false, treeData: null, error: null });
@@ -84,10 +73,7 @@ const useAsyncJSONProcessing = (
try {
const parsedBody = recursiveParseJSON(value);
if (!isEmpty(parsedBody)) {
const treeData = jsonToDataNodes(parsedBody, {
isBodyJsonQueryEnabled,
handleChangeSelectedView,
});
const treeData = jsonToDataNodes(parsedBody);
setJsonState({ isLoading: false, treeData, error: null });
} else {
setJsonState({ isLoading: false, treeData: null, error: null });
@@ -115,7 +101,7 @@ const useAsyncJSONProcessing = (
return (): void => {
processingRef.current = false;
};
}, [value, shouldProcess, isBodyJsonQueryEnabled, handleChangeSelectedView]);
}, [value, shouldProcess]);
return jsonState;
};

View File

@@ -1,5 +1,4 @@
export const DROPDOWN_KEY = {
FILTER_IN: 'filterIn',
FILTER_OUT: 'filterOut',
GROUP_BY: 'groupBy',
};

View File

@@ -1,6 +1,5 @@
import Convert from 'ansi-to-html';
import type { DataNode } from 'antd/es/tree';
import { ChangeViewFunctionType } from 'container/ExplorerOptions/types';
import { MetricsType } from 'container/MetricsApplication/constant';
import dompurify from 'dompurify';
import { uniqueId } from 'lodash-es';
@@ -35,32 +34,13 @@ export const recursiveParseJSON = (obj: string): Record<string, unknown> => {
}
};
type JsonToDataNodesOptions = {
parentKey?: string;
parentIsArray?: boolean;
isBodyJsonQueryEnabled?: boolean;
handleChangeSelectedView?: ChangeViewFunctionType;
};
type ComputeDataNodeOptions = {
key: string;
valueIsArray: boolean;
value: unknown;
nodeKey: string;
parentIsArray: boolean;
isBodyJsonQueryEnabled?: boolean;
handleChangeSelectedView?: ChangeViewFunctionType;
};
export const computeDataNode = ({
key,
valueIsArray,
value,
nodeKey,
parentIsArray,
isBodyJsonQueryEnabled = false,
handleChangeSelectedView,
}: ComputeDataNodeOptions): DataNode => ({
export const computeDataNode = (
key: string,
valueIsArray: boolean,
value: unknown,
nodeKey: string,
parentIsArray: boolean,
): DataNode => ({
key: uniqueId(),
title: (
<BodyTitleRenderer
@@ -68,30 +48,20 @@ export const computeDataNode = ({
nodeKey={nodeKey}
value={value}
parentIsArray={parentIsArray}
handleChangeSelectedView={handleChangeSelectedView}
/>
),
children: jsonToDataNodes(value as Record<string, unknown>, {
parentKey: valueIsArray
? `${nodeKey}${isBodyJsonQueryEnabled ? '[]' : '[*]'}`
: nodeKey,
parentIsArray: valueIsArray,
isBodyJsonQueryEnabled,
handleChangeSelectedView,
}),
children: jsonToDataNodes(
value as Record<string, unknown>,
valueIsArray ? `${nodeKey}[*]` : nodeKey,
valueIsArray,
),
});
export function jsonToDataNodes(
json: Record<string, unknown>,
options: JsonToDataNodesOptions = {},
parentKey = '',
parentIsArray = false,
): DataNode[] {
const {
parentKey = '',
parentIsArray = false,
isBodyJsonQueryEnabled = false,
handleChangeSelectedView,
} = options;
return Object.entries(json).map(([key, value]) => {
let nodeKey = parentKey || key;
if (parentIsArray) {
@@ -104,15 +74,7 @@ export function jsonToDataNodes(
if (parentIsArray) {
if (typeof value === 'object' && value !== null) {
return computeDataNode({
key,
valueIsArray,
value,
nodeKey,
parentIsArray,
isBodyJsonQueryEnabled,
handleChangeSelectedView,
});
return computeDataNode(key, valueIsArray, value, nodeKey, parentIsArray);
}
return {
@@ -123,31 +85,14 @@ export function jsonToDataNodes(
nodeKey={nodeKey}
value={value}
parentIsArray={parentIsArray}
handleChangeSelectedView={handleChangeSelectedView}
/>
),
children: jsonToDataNodes(
{},
{
parentKey: nodeKey,
parentIsArray: valueIsArray,
isBodyJsonQueryEnabled,
handleChangeSelectedView,
},
),
children: jsonToDataNodes({}, nodeKey, valueIsArray),
};
}
if (typeof value === 'object' && value !== null) {
return computeDataNode({
key,
valueIsArray,
value,
nodeKey,
parentIsArray,
isBodyJsonQueryEnabled,
handleChangeSelectedView,
});
return computeDataNode(key, valueIsArray, value, nodeKey, parentIsArray);
}
return {
key: uniqueId(),
@@ -157,7 +102,6 @@ export function jsonToDataNodes(
nodeKey={nodeKey}
value={value}
parentIsArray={parentIsArray}
handleChangeSelectedView={handleChangeSelectedView}
/>
),
};
@@ -179,7 +123,6 @@ export function flattenObject(obj: AnyObject, prefix = ''): AnyObject {
export const generateFieldKeyForArray = (
fieldKey: string,
dataType: DataTypes,
isBodyJsonQueryEnabled = false,
): string => {
let lastDotIndex = fieldKey.lastIndexOf('.');
let resultNodeKey = fieldKey;
@@ -195,16 +138,6 @@ export const generateFieldKeyForArray = (
newResultNodeKey = resultNodeKey.substring(0, lastDotIndex);
}
}
// When filtering for a value inside an array, the query builder expects the
// last array segment to be referenced without the trailing `[]`.
// Examples:
// - has(body.config.features, 'fast_checkout')
// - has(body.config.features[].items, 'pen')
// - has(body.config.features[].items[].variants, 'ballpen')
if (isBodyJsonQueryEnabled && newResultNodeKey.endsWith('[]')) {
newResultNodeKey = newResultNodeKey.slice(0, -2);
}
return `body.${newResultNodeKey}`;
};

View File

@@ -279,7 +279,7 @@ function Explorer(): JSX.Element {
[],
);
const [warning, setWarning] = useState<Warning | undefined>();
const [warning, setWarning] = useState<Warning | undefined>(undefined);
const oneChartPerQueryDisabledTooltip = useMemo(() => {
if (splitedQueries.length <= 1) {
@@ -291,7 +291,7 @@ function Explorer(): JSX.Element {
if (disableOneChartPerQuery) {
return 'One chart per query cannot be disabled for multiple queries with different units.';
}
return;
return undefined;
}, [disableOneChartPerQuery, splitedQueries.length, units.length]);
// Show the y axis unit selector if -

View File

@@ -217,7 +217,7 @@ function Inspect({
);
}
if (inspectMetricsTimeSeries.length === 0) {
if (!inspectMetricsTimeSeries.length) {
return renderFallback(
'inspect-metrics-empty',
<Empty description="No time series found for this metric to inspect." />,

View File

@@ -254,10 +254,10 @@ export function useInspectMetrics(
const valuesMap = new Map<number, number>();
series.values.forEach(({ timestamp, value }) => {
valuesMap.set(timestamp, Number.parseFloat(value));
valuesMap.set(timestamp, parseFloat(value));
});
return timestamps.map((timestamp) => valuesMap.get(timestamp) ?? Number.NaN);
return timestamps.map((timestamp) => valuesMap.get(timestamp) ?? NaN);
});
const rawData = [timestamps, ...timeseriesArray];
@@ -271,7 +271,7 @@ export function useInspectMetrics(
labels.add(label);
});
});
return [...labels];
return Array.from(labels);
}, [inspectMetricsData]);
const reset = useCallback(() => {

View File

@@ -18,7 +18,7 @@ describe('RunQueryBtn', () => {
);
});
it('renders run state and triggers on click', async () => {
test('renders run state and triggers on click', async () => {
const user = userEvent.setup();
const onRun = jest.fn();
const onCancel = jest.fn();
@@ -35,7 +35,7 @@ describe('RunQueryBtn', () => {
expect(onRun).toHaveBeenCalledTimes(1);
});
it('shows cancel state and calls handleCancelQuery', async () => {
test('shows cancel state and calls handleCancelQuery', async () => {
const user = userEvent.setup();
const onRun = jest.fn();
const onCancel = jest.fn();
@@ -51,19 +51,19 @@ describe('RunQueryBtn', () => {
expect(onCancel).toHaveBeenCalledTimes(1);
});
it('disabled when disabled prop is true', () => {
test('disabled when disabled prop is true', () => {
render(<RunQueryBtn disabled />);
expect(screen.getByRole('button', { name: /run query/i })).toBeDisabled();
});
it('disabled when no props provided', () => {
test('disabled when no props provided', () => {
render(<RunQueryBtn />);
expect(
screen.getByRole('button', { name: /run query/i }),
).toBeInTheDocument();
});
it('shows Command + CornerDownLeft on mac', () => {
test('shows Command + CornerDownLeft on mac', () => {
const { container } = render(
<RunQueryBtn
onStageRunQuery={jest.fn()}
@@ -77,7 +77,7 @@ describe('RunQueryBtn', () => {
).toBeInTheDocument();
});
it('shows ChevronUp + CornerDownLeft on non-mac', () => {
test('shows ChevronUp + CornerDownLeft on non-mac', () => {
(getUserOperatingSystem as jest.Mock).mockReturnValue(
UserOperatingSystem.WINDOWS,
);
@@ -95,7 +95,7 @@ describe('RunQueryBtn', () => {
).toBeInTheDocument();
});
it('renders custom label when provided', () => {
test('renders custom label when provided', () => {
render(
<RunQueryBtn
onStageRunQuery={jest.fn()}

View File

@@ -115,8 +115,8 @@ export const useGetQueryRange: UseGetQueryRange = (
const updatedQuery = updateBarStepInterval(
requestData.query,
requestData.start ? requestData.start * 1e3 : Number.parseInt(start, 10) * 1e3,
requestData.end ? requestData.end * 1e3 : Number.parseInt(end, 10) * 1e3,
requestData.start ? requestData.start * 1e3 : parseInt(start, 10) * 1e3,
requestData.end ? requestData.end * 1e3 : parseInt(end, 10) * 1e3,
);
return {

View File

@@ -98,7 +98,7 @@ function LogsExplorer(): JSX.Element {
setIsLoadingQueries(false);
}, [queryClient]);
const [warning, setWarning] = useState<Warning | undefined>();
const [warning, setWarning] = useState<Warning | undefined>(undefined);
const handleChangeSelectedView = useCallback(
(view: ExplorerViews, querySearchParameters?: ICurrentQueryData): void => {

View File

@@ -101,7 +101,7 @@ function TracesExplorer(): JSX.Element {
getExplorerViewFromUrl(searchParams, panelTypesFromUrl),
);
const [warning, setWarning] = useState<Warning | undefined>();
const [warning, setWarning] = useState<Warning | undefined>(undefined);
const [isOpen, setOpen] = useState<boolean>(true);
const defaultQuery = useMemo(

View File

@@ -16,7 +16,7 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
ID: "ListHosts",
Tags: []string{"inframonitoring"},
Summary: "List Hosts for Infra Monitoring",
Description: "Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (cpu, memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available for that field; frontends should render '—' rather than the literal value.",
Description: "Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary.",
Request: new(inframonitoringtypes.PostableHosts),
RequestContentType: "application/json",
Response: new(inframonitoringtypes.Hosts),
@@ -29,43 +29,5 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v2/infra_monitoring/pods", handler.New(
provider.authZ.ViewAccess(provider.infraMonitoringHandler.ListPods),
handler.OpenAPIDef{
ID: "ListPods",
Tags: []string{"inframonitoring"},
Summary: "List Pods for Infra Monitoring",
Description: "Returns a paginated list of Kubernetes pods with key metrics: CPU usage, CPU request/limit utilization, memory working set, memory request/limit utilization, current pod phase (pending/running/succeeded/failed/unknown), and pod age (ms since start time). Each pod includes metadata attributes (namespace, node, workload owner such as deployment/statefulset/daemonset/job/cronjob, cluster). Supports filtering via a filter expression, custom groupBy to aggregate pods by any attribute, ordering by any of the six metrics (cpu, cpu_request, cpu_limit, memory, memory_request, memory_limit), and pagination via offset/limit. The response type is 'list' for the default k8s.pod.uid grouping (each row is one pod with its current phase) or 'grouped_list' for custom groupBy keys (each row aggregates pods in the group with per-phase counts: pendingPodCount, runningPodCount, succeededPodCount, failedPodCount, unknownPodCount derived from each pod's latest phase in the window). Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (podCPU, podCPURequest, podCPULimit, podMemory, podMemoryRequest, podMemoryLimit, podAge) return -1 as a sentinel when no data is available for that field; frontends should render '—' rather than the literal value.",
Request: new(inframonitoringtypes.PostablePods),
RequestContentType: "application/json",
Response: new(inframonitoringtypes.Pods),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodPost).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v2/infra_monitoring/nodes", handler.New(
provider.authZ.ViewAccess(provider.infraMonitoringHandler.ListNodes),
handler.OpenAPIDef{
ID: "ListNodes",
Tags: []string{"inframonitoring"},
Summary: "List Nodes for Infra Monitoring",
Description: "Returns a paginated list of Kubernetes nodes with key metrics: CPU usage, CPU allocatable, memory working set, memory allocatable, and per-group readyNodeCount / notReadyNodeCount derived from each node's latest k8s.node.condition_ready value in the window. Each node includes metadata attributes (k8s.node.uid, k8s.cluster.name). The response type is 'list' for the default k8s.node.name grouping (each row is one node with its current condition string: ready / not_ready / '') or 'grouped_list' for custom groupBy keys (each row aggregates nodes in the group with readyNodeCount and notReadyNodeCount; condition stays empty). Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_allocatable / memory / memory_allocatable, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (nodeCPU, nodeCPUAllocatable, nodeMemory, nodeMemoryAllocatable) return -1 as a sentinel when no data is available for that field; frontends should render '—' rather than the literal value.",
Request: new(inframonitoringtypes.PostableNodes),
RequestContentType: "application/json",
Response: new(inframonitoringtypes.Nodes),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodPost).GetError(); err != nil {
return err
}
return nil
}

View File

@@ -45,51 +45,3 @@ func (h *handler) ListHosts(rw http.ResponseWriter, req *http.Request) {
render.Success(rw, http.StatusOK, result)
}
func (h *handler) ListPods(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
var parsedReq inframonitoringtypes.PostablePods
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
render.Error(rw, err)
return
}
result, err := h.module.ListPods(req.Context(), orgID, &parsedReq)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusOK, result)
}
func (h *handler) ListNodes(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
var parsedReq inframonitoringtypes.PostableNodes
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
render.Error(rw, err)
return
}
result, err := h.module.ListNodes(req.Context(), orgID, &parsedReq)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusOK, result)
}

View File

@@ -14,18 +14,3 @@ type groupHostStatusCounts struct {
Active int
Inactive int
}
// podPhaseCounts holds per-group pod counts bucketed by latest phase in window.
type podPhaseCounts struct {
Pending int
Running int
Succeeded int
Failed int
Unknown int
}
// nodeConditionCounts holds per-group node counts bucketed by latest condition_ready in window.
type nodeConditionCounts struct {
Ready int
NotReady int
}

View File

@@ -159,169 +159,3 @@ func (m *module) ListHosts(ctx context.Context, orgID valuer.UUID, req *inframon
return resp, nil
}
func (m *module) ListPods(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostablePods) (*inframonitoringtypes.Pods, error) {
if err := req.Validate(); err != nil {
return nil, err
}
resp := &inframonitoringtypes.Pods{}
if req.OrderBy == nil {
req.OrderBy = &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: inframonitoringtypes.PodsOrderByCPU,
},
},
Direction: qbtypes.OrderDirectionDesc,
}
}
if len(req.GroupBy) == 0 {
req.GroupBy = []qbtypes.GroupByKey{podUIDGroupByKey}
resp.Type = inframonitoringtypes.ResponseTypeList
} else {
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
}
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, podsTableMetricNamesList)
if err != nil {
return nil, err
}
if len(missingMetrics) > 0 {
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
resp.Records = []inframonitoringtypes.PodRecord{}
resp.Total = 0
return resp, nil
}
if req.End < int64(minFirstReportedUnixMilli) {
resp.EndTimeBeforeRetention = true
resp.Records = []inframonitoringtypes.PodRecord{}
resp.Total = 0
return resp, nil
}
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: []string{}}
metadataMap, err := m.getPodsTableMetadata(ctx, req)
if err != nil {
return nil, err
}
resp.Total = len(metadataMap)
pageGroups, err := m.getTopPodGroups(ctx, orgID, req, metadataMap)
if err != nil {
return nil, err
}
if len(pageGroups) == 0 {
resp.Records = []inframonitoringtypes.PodRecord{}
return resp, nil
}
filterExpr := ""
if req.Filter != nil {
filterExpr = req.Filter.Expression
}
fullQueryReq := buildFullQueryRequest(req.Start, req.End, filterExpr, req.GroupBy, pageGroups, m.newPodsTableListQuery())
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
if err != nil {
return nil, err
}
phaseCounts, err := m.getPerGroupPodPhaseCounts(ctx, req, pageGroups)
if err != nil {
return nil, err
}
isPodUIDInGroupBy := isKeyInGroupByAttrs(req.GroupBy, podUIDAttrKey)
resp.Records = buildPodRecords(isPodUIDInGroupBy, queryResp, pageGroups, req.GroupBy, metadataMap, phaseCounts, req.End)
resp.Warning = queryResp.Warning
return resp, nil
}
func (m *module) ListNodes(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableNodes) (*inframonitoringtypes.Nodes, error) {
if err := req.Validate(); err != nil {
return nil, err
}
resp := &inframonitoringtypes.Nodes{}
if req.OrderBy == nil {
req.OrderBy = &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: inframonitoringtypes.NodesOrderByCPU,
},
},
Direction: qbtypes.OrderDirectionDesc,
}
}
if len(req.GroupBy) == 0 {
req.GroupBy = []qbtypes.GroupByKey{nodeNameGroupByKey}
resp.Type = inframonitoringtypes.ResponseTypeList
} else {
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
}
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, nodesTableMetricNamesList)
if err != nil {
return nil, err
}
if len(missingMetrics) > 0 {
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
resp.Records = []inframonitoringtypes.NodeRecord{}
resp.Total = 0
return resp, nil
}
if req.End < int64(minFirstReportedUnixMilli) {
resp.EndTimeBeforeRetention = true
resp.Records = []inframonitoringtypes.NodeRecord{}
resp.Total = 0
return resp, nil
}
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: []string{}}
metadataMap, err := m.getNodesTableMetadata(ctx, req)
if err != nil {
return nil, err
}
resp.Total = len(metadataMap)
pageGroups, err := m.getTopNodeGroups(ctx, orgID, req, metadataMap)
if err != nil {
return nil, err
}
if len(pageGroups) == 0 {
resp.Records = []inframonitoringtypes.NodeRecord{}
return resp, nil
}
filterExpr := ""
if req.Filter != nil {
filterExpr = req.Filter.Expression
}
fullQueryReq := buildFullQueryRequest(req.Start, req.End, filterExpr, req.GroupBy, pageGroups, m.newNodesTableListQuery())
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
if err != nil {
return nil, err
}
conditionCounts, err := m.getPerGroupNodeConditionCounts(ctx, req, pageGroups)
if err != nil {
return nil, err
}
isNodeNameInGroupBy := isKeyInGroupByAttrs(req.GroupBy, nodeNameAttrKey)
resp.Records = buildNodeRecords(isNodeNameInGroupBy, queryResp, pageGroups, req.GroupBy, metadataMap, conditionCounts)
resp.Warning = queryResp.Warning
return resp, nil
}

View File

@@ -1,299 +0,0 @@
package implinframonitoring
import (
"context"
"fmt"
"slices"
"strings"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/huandu/go-sqlbuilder"
)
// buildNodeRecords assembles the page records. Condition counts come from
// conditionCounts in both modes. In list mode (isNodeNameInGroupBy=true) each
// group is one node, so exactly one count is 1; Condition is derived from
// which one. In grouped_list mode Condition stays NodeConditionNone.
func buildNodeRecords(
isNodeNameInGroupBy bool,
resp *qbtypes.QueryRangeResponse,
pageGroups []map[string]string,
groupBy []qbtypes.GroupByKey,
metadataMap map[string]map[string]string,
conditionCounts map[string]nodeConditionCounts,
) []inframonitoringtypes.NodeRecord {
metricsMap := parseFullQueryResponse(resp, groupBy)
records := make([]inframonitoringtypes.NodeRecord, 0, len(pageGroups))
for _, labels := range pageGroups {
compositeKey := compositeKeyFromLabels(labels, groupBy)
nodeName := labels[nodeNameAttrKey]
record := inframonitoringtypes.NodeRecord{ // initialize with default values
NodeName: nodeName,
Condition: inframonitoringtypes.NodeConditionNone,
NodeCPU: -1,
NodeCPUAllocatable: -1,
NodeMemory: -1,
NodeMemoryAllocatable: -1,
Meta: map[string]any{},
}
if metrics, ok := metricsMap[compositeKey]; ok {
if v, exists := metrics["A"]; exists {
record.NodeCPU = v
}
if v, exists := metrics["B"]; exists {
record.NodeCPUAllocatable = v
}
if v, exists := metrics["C"]; exists {
record.NodeMemory = v
}
if v, exists := metrics["D"]; exists {
record.NodeMemoryAllocatable = v
}
}
if conditionCountsForGroup, ok := conditionCounts[compositeKey]; ok {
record.ReadyNodesCount = conditionCountsForGroup.Ready
record.NotReadyNodesCount = conditionCountsForGroup.NotReady
// In list mode each group is one node; the count==1 bucket identifies the condition.
if isNodeNameInGroupBy {
switch {
case conditionCountsForGroup.Ready == 1:
record.Condition = inframonitoringtypes.NodeConditionReady
case conditionCountsForGroup.NotReady == 1:
record.Condition = inframonitoringtypes.NodeConditionNotReady
}
}
}
if attrs, ok := metadataMap[compositeKey]; ok {
for k, v := range attrs {
record.Meta[k] = v
}
}
records = append(records, record)
}
return records
}
func (m *module) getTopNodeGroups(
ctx context.Context,
orgID valuer.UUID,
req *inframonitoringtypes.PostableNodes,
metadataMap map[string]map[string]string,
) ([]map[string]string, error) {
orderByKey := req.OrderBy.Key.Name
queryNamesForOrderBy := orderByToNodesQueryNames[orderByKey]
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
topReq := &qbtypes.QueryRangeRequest{
Start: uint64(req.Start),
End: uint64(req.End),
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
},
}
for _, envelope := range m.newNodesTableListQuery().CompositeQuery.Queries {
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
continue
}
copied := envelope
if copied.Type == qbtypes.QueryTypeBuilder {
existingExpr := ""
if f := copied.GetFilter(); f != nil {
existingExpr = f.Expression
}
reqFilterExpr := ""
if req.Filter != nil {
reqFilterExpr = req.Filter.Expression
}
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
copied.SetFilter(&qbtypes.Filter{Expression: merged})
copied.SetGroupBy(req.GroupBy)
}
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
}
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
if err != nil {
return nil, err
}
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
}
func (m *module) getNodesTableMetadata(ctx context.Context, req *inframonitoringtypes.PostableNodes) (map[string]map[string]string, error) {
var nonGroupByAttrs []string
for _, key := range nodeAttrKeysForMetadata {
if !isKeyInGroupByAttrs(req.GroupBy, key) {
nonGroupByAttrs = append(nonGroupByAttrs, key)
}
}
return m.getMetadata(ctx, nodesTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
}
// getPerGroupNodeConditionCounts computes per-group node counts bucketed by each
// node's latest condition_ready value (0 / 1) in the requested window.
// Pipeline:
//
// timeSeriesFPs: fp ↔ (node_name, groupBy cols) from the time_series table.
// User filter + page-groups filter applied here.
// latestConditionPerNode: INNER JOIN samples × timeSeriesFPs, collapsed to
// the latest condition value per node via argMax(value, unix_milli).
// countNodesPerCondition: per-group uniqExactIf into ready/not_ready buckets.
//
// Groups absent from the result map have implicit zero counts (caller default).
func (m *module) getPerGroupNodeConditionCounts(
ctx context.Context,
req *inframonitoringtypes.PostableNodes,
pageGroups []map[string]string,
) (map[string]nodeConditionCounts, error) {
if len(pageGroups) == 0 || len(req.GroupBy) == 0 {
return map[string]nodeConditionCounts{}, nil
}
// Merged filter expression (user filter + page-groups IN clauses).
reqFilterExpr := ""
if req.Filter != nil {
reqFilterExpr = req.Filter.Expression
}
pageGroupsFilterExpr := buildPageGroupsFilterExpr(pageGroups)
filterExpr := mergeFilterExpressions(reqFilterExpr, pageGroupsFilterExpr)
// Resolve tables. Same convention as pods.
adjustedStart, adjustedEnd, _, localTimeSeriesTable := telemetrymetrics.WhichTSTableToUse(
uint64(req.Start), uint64(req.End), nil,
)
samplesTable := telemetrymetrics.WhichSamplesTableToUse(
uint64(req.Start), uint64(req.End),
metrictypes.UnspecifiedType, metrictypes.TimeAggregationUnspecified, nil,
)
valueCol := telemetrymetrics.ValueColumnForSamplesTable(samplesTable)
// ----- timeSeriesFPs -----
timeSeriesFPs := sqlbuilder.NewSelectBuilder()
timeSeriesFPsSelectCols := []string{
"fingerprint",
fmt.Sprintf("JSONExtractString(labels, %s) AS node_name", timeSeriesFPs.Var(nodeNameAttrKey)),
}
for _, key := range req.GroupBy {
timeSeriesFPsSelectCols = append(timeSeriesFPsSelectCols,
fmt.Sprintf("JSONExtractString(labels, %s) AS %s", timeSeriesFPs.Var(key.Name), quoteIdentifier(key.Name)),
)
}
timeSeriesFPs.Select(timeSeriesFPsSelectCols...)
timeSeriesFPs.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, localTimeSeriesTable))
timeSeriesFPs.Where(
timeSeriesFPs.E("metric_name", nodeConditionMetricName),
timeSeriesFPs.GE("unix_milli", adjustedStart),
timeSeriesFPs.L("unix_milli", adjustedEnd),
)
if filterExpr != "" {
filterClause, err := m.buildFilterClause(ctx, &qbtypes.Filter{Expression: filterExpr}, req.Start, req.End)
if err != nil {
return nil, err
}
if filterClause != nil {
timeSeriesFPs.AddWhereClause(filterClause)
}
}
timeSeriesFPsGroupBy := []string{"fingerprint", "node_name"}
for _, key := range req.GroupBy {
timeSeriesFPsGroupBy = append(timeSeriesFPsGroupBy, quoteIdentifier(key.Name))
}
timeSeriesFPs.GroupBy(timeSeriesFPsGroupBy...)
timeSeriesFPsSQL, timeSeriesFPsArgs := timeSeriesFPs.BuildWithFlavor(sqlbuilder.ClickHouse)
// ----- latestConditionPerNode -----
latestConditionPerNode := sqlbuilder.NewSelectBuilder()
latestConditionPerNodeSelectCols := []string{"tsfp.node_name AS node_name"}
latestConditionPerNodeGroupBy := []string{"node_name"}
for _, key := range req.GroupBy {
col := quoteIdentifier(key.Name)
latestConditionPerNodeSelectCols = append(latestConditionPerNodeSelectCols, fmt.Sprintf("tsfp.%s AS %s", col, col))
latestConditionPerNodeGroupBy = append(latestConditionPerNodeGroupBy, col)
}
latestConditionPerNodeSelectCols = append(latestConditionPerNodeSelectCols,
fmt.Sprintf("argMax(samples.%s, samples.unix_milli) AS condition_value", valueCol),
)
latestConditionPerNode.Select(latestConditionPerNodeSelectCols...)
latestConditionPerNode.From(fmt.Sprintf(
"%s.%s AS samples INNER JOIN time_series_fps AS tsfp ON samples.fingerprint = tsfp.fingerprint",
telemetrymetrics.DBName, samplesTable,
))
latestConditionPerNode.Where(
latestConditionPerNode.E("samples.metric_name", nodeConditionMetricName),
latestConditionPerNode.GE("samples.unix_milli", req.Start),
latestConditionPerNode.L("samples.unix_milli", req.End),
"tsfp.node_name != ''",
)
latestConditionPerNode.GroupBy(latestConditionPerNodeGroupBy...)
latestConditionPerNodeSQL, latestConditionPerNodeArgs := latestConditionPerNode.BuildWithFlavor(sqlbuilder.ClickHouse)
// ----- countNodesPerCondition (outer SELECT) -----
countNodesPerConditionSelectCols := make([]string, 0, len(req.GroupBy)+2)
countNodesPerConditionGroupBy := make([]string, 0, len(req.GroupBy))
for _, key := range req.GroupBy {
col := quoteIdentifier(key.Name)
countNodesPerConditionSelectCols = append(countNodesPerConditionSelectCols, col)
countNodesPerConditionGroupBy = append(countNodesPerConditionGroupBy, col)
}
countNodesPerConditionSelectCols = append(countNodesPerConditionSelectCols,
fmt.Sprintf("uniqExactIf(node_name, condition_value = %d) AS ready_count", inframonitoringtypes.NodeConditionNumReady),
fmt.Sprintf("uniqExactIf(node_name, condition_value = %d) AS not_ready_count", inframonitoringtypes.NodeConditionNumNotReady),
)
countNodesPerConditionSQL := fmt.Sprintf(
"SELECT %s FROM latest_condition_per_node GROUP BY %s",
strings.Join(countNodesPerConditionSelectCols, ", "),
strings.Join(countNodesPerConditionGroupBy, ", "),
)
// Combine CTEs + outer.
cteFragments := []string{
fmt.Sprintf("time_series_fps AS (%s)", timeSeriesFPsSQL),
fmt.Sprintf("latest_condition_per_node AS (%s)", latestConditionPerNodeSQL),
}
finalSQL := querybuilder.CombineCTEs(cteFragments) + countNodesPerConditionSQL
finalArgs := querybuilder.PrependArgs([][]any{timeSeriesFPsArgs, latestConditionPerNodeArgs}, nil)
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, finalSQL, finalArgs...)
if err != nil {
return nil, err
}
defer rows.Close()
result := make(map[string]nodeConditionCounts)
for rows.Next() {
groupVals := make([]string, len(req.GroupBy))
scanPtrs := make([]any, 0, len(req.GroupBy)+2)
for i := range groupVals {
scanPtrs = append(scanPtrs, &groupVals[i])
}
var ready, notReady uint64
scanPtrs = append(scanPtrs, &ready, &notReady)
if err := rows.Scan(scanPtrs...); err != nil {
return nil, err
}
result[compositeKeyFromList(groupVals)] = nodeConditionCounts{
Ready: int(ready),
NotReady: int(notReady),
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return result, nil
}

View File

@@ -1,136 +0,0 @@
package implinframonitoring
import (
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
const (
nodeNameAttrKey = "k8s.node.name"
nodeConditionMetricName = "k8s.node.condition_ready"
)
var nodeNameGroupByKey = qbtypes.GroupByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: nodeNameAttrKey,
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
}
// nodesTableMetricNamesList drives the existence/retention check.
// Includes condition_ready so the response short-circuits cleanly when a
// cluster doesn't ship the metric — even though condition_ready isn't part
// of the QB composite query (it's queried separately via getPerGroupNodeConditionCounts).
var nodesTableMetricNamesList = []string{
"k8s.node.cpu.usage",
"k8s.node.allocatable_cpu",
"k8s.node.memory.working_set",
"k8s.node.allocatable_memory",
nodeConditionMetricName,
}
var nodeAttrKeysForMetadata = []string{
"k8s.node.name",
"k8s.node.uid",
"k8s.cluster.name",
}
var orderByToNodesQueryNames = map[string][]string{
inframonitoringtypes.NodesOrderByCPU: {"A"},
inframonitoringtypes.NodesOrderByCPUAllocatable: {"B"},
inframonitoringtypes.NodesOrderByMemory: {"C"},
inframonitoringtypes.NodesOrderByMemoryAllocatable: {"D"},
}
// newNodesTableListQuery builds the composite QB v5 request for the nodes list.
// Node condition is derived separately via getPerGroupNodeConditionCounts (works
// for both list and grouped_list modes), so no condition query is included here.
func (m *module) newNodesTableListQuery() *qbtypes.QueryRangeRequest {
queries := []qbtypes.QueryEnvelope{
// Query A: CPU usage
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "A",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.node.cpu.usage",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{nodeNameGroupByKey},
Disabled: false,
},
},
// Query B: CPU allocatable.
// TimeAggregationLatest is the closest v5 equivalent of v1's AnyLast;
// allocatable values change rarely so divergence in practice is negligible.
// TODO(nikhilmantri0902): switch to AnyLast once v5 supports it.
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "B",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.node.allocatable_cpu",
TimeAggregation: metrictypes.TimeAggregationLatest,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{nodeNameGroupByKey},
Disabled: false,
},
},
// Query C: Memory working set
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "C",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.node.memory.working_set",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{nodeNameGroupByKey},
Disabled: false,
},
},
// Query D: Memory allocatable. Same Latest caveat as Query B.
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "D",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.node.allocatable_memory",
TimeAggregation: metrictypes.TimeAggregationLatest,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{nodeNameGroupByKey},
Disabled: false,
},
},
}
return &qbtypes.QueryRangeRequest{
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: queries,
},
}
}

View File

@@ -1,334 +0,0 @@
package implinframonitoring
import (
"context"
"fmt"
"slices"
"strings"
"time"
"github.com/SigNoz/signoz/pkg/querybuilder"
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/huandu/go-sqlbuilder"
)
// buildPodRecords assembles the page records. Phase counts come from
// phaseCounts in both modes. In list mode (isPodUIDInGroupBy=true) each
// group is one pod, so exactly one count is 1; PodPhase is derived from
// which one. In grouped_list mode PodPhase stays PodPhaseNone.
func buildPodRecords(
isPodUIDInGroupBy bool,
resp *qbtypes.QueryRangeResponse,
pageGroups []map[string]string,
groupBy []qbtypes.GroupByKey,
metadataMap map[string]map[string]string,
phaseCounts map[string]podPhaseCounts,
reqEnd int64,
) []inframonitoringtypes.PodRecord {
metricsMap := parseFullQueryResponse(resp, groupBy)
records := make([]inframonitoringtypes.PodRecord, 0, len(pageGroups))
for _, labels := range pageGroups {
compositeKey := compositeKeyFromLabels(labels, groupBy)
podUID := labels[podUIDAttrKey]
record := inframonitoringtypes.PodRecord{ // initialize with default values
PodUID: podUID,
PodPhase: inframonitoringtypes.PodPhaseNone,
PodCPU: -1,
PodCPURequest: -1,
PodCPULimit: -1,
PodMemory: -1,
PodMemoryRequest: -1,
PodMemoryLimit: -1,
PodAge: -1,
Meta: map[string]any{},
}
if metrics, ok := metricsMap[compositeKey]; ok {
if v, exists := metrics["A"]; exists {
record.PodCPU = v
}
if v, exists := metrics["B"]; exists {
record.PodCPURequest = v
}
if v, exists := metrics["C"]; exists {
record.PodCPULimit = v
}
if v, exists := metrics["D"]; exists {
record.PodMemory = v
}
if v, exists := metrics["E"]; exists {
record.PodMemoryRequest = v
}
if v, exists := metrics["F"]; exists {
record.PodMemoryLimit = v
}
}
if phaseCountsForGroup, ok := phaseCounts[compositeKey]; ok {
record.PendingPodCount = phaseCountsForGroup.Pending
record.RunningPodCount = phaseCountsForGroup.Running
record.SucceededPodCount = phaseCountsForGroup.Succeeded
record.FailedPodCount = phaseCountsForGroup.Failed
record.UnknownPodCount = phaseCountsForGroup.Unknown
// In list mode each group is one pod; the count==1 bucket identifies the phase.
if isPodUIDInGroupBy {
switch {
case phaseCountsForGroup.Pending == 1:
record.PodPhase = inframonitoringtypes.PodPhasePending
case phaseCountsForGroup.Running == 1:
record.PodPhase = inframonitoringtypes.PodPhaseRunning
case phaseCountsForGroup.Succeeded == 1:
record.PodPhase = inframonitoringtypes.PodPhaseSucceeded
case phaseCountsForGroup.Failed == 1:
record.PodPhase = inframonitoringtypes.PodPhaseFailed
case phaseCountsForGroup.Unknown == 1:
record.PodPhase = inframonitoringtypes.PodPhaseUnknown
}
}
}
if attrs, ok := metadataMap[compositeKey]; ok && isPodUIDInGroupBy {
// the condition above ensures we deduce age only if pod uid is in group by because if
// it's not in group by then we might have multiple pod uids in the same group and hence then podAge wont make sense
if startTimeStr, exists := attrs[podStartTimeAttrKey]; exists && startTimeStr != "" {
if t, err := time.Parse(time.RFC3339, startTimeStr); err == nil {
startTimeMs := t.UnixMilli()
if startTimeMs > 0 {
record.PodAge = reqEnd - startTimeMs
}
}
}
for k, v := range attrs {
record.Meta[k] = v
}
}
records = append(records, record)
}
return records
}
func (m *module) getTopPodGroups(
ctx context.Context,
orgID valuer.UUID,
req *inframonitoringtypes.PostablePods,
metadataMap map[string]map[string]string,
) ([]map[string]string, error) {
orderByKey := req.OrderBy.Key.Name
queryNamesForOrderBy := orderByToPodsQueryNames[orderByKey]
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
topReq := &qbtypes.QueryRangeRequest{
Start: uint64(req.Start),
End: uint64(req.End),
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
},
}
for _, envelope := range m.newPodsTableListQuery().CompositeQuery.Queries {
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
continue
}
copied := envelope
if copied.Type == qbtypes.QueryTypeBuilder {
existingExpr := ""
if f := copied.GetFilter(); f != nil {
existingExpr = f.Expression
}
reqFilterExpr := ""
if req.Filter != nil {
reqFilterExpr = req.Filter.Expression
}
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
copied.SetFilter(&qbtypes.Filter{Expression: merged})
copied.SetGroupBy(req.GroupBy)
}
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
}
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
if err != nil {
return nil, err
}
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
}
func (m *module) getPodsTableMetadata(ctx context.Context, req *inframonitoringtypes.PostablePods) (map[string]map[string]string, error) {
var nonGroupByAttrs []string
for _, key := range podAttrKeysForMetadata {
if !isKeyInGroupByAttrs(req.GroupBy, key) {
nonGroupByAttrs = append(nonGroupByAttrs, key)
}
}
return m.getMetadata(ctx, podsTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
}
// getPerGroupPodPhaseCounts computes per-group pod counts bucketed by each
// pod's latest phase in the requested window.
// Pipeline:
//
// timeSeriesFPs: fp ↔ (pod_uid, groupBy cols) from the time_series table.
// User filter + page-groups filter applied here.
// latestPhasePerPod: INNER JOIN samples × timeSeriesFPs, collapsed to
// the latest phase per pod via argMax(value, unix_milli).
// countPodsPerPhase: per-group uniqExactIf into 5 phase buckets.
//
// Groups absent from the result map have implicit zero counts (caller default).
func (m *module) getPerGroupPodPhaseCounts(
ctx context.Context,
req *inframonitoringtypes.PostablePods,
pageGroups []map[string]string,
) (map[string]podPhaseCounts, error) {
if len(pageGroups) == 0 || len(req.GroupBy) == 0 {
return map[string]podPhaseCounts{}, nil
}
// Merged filter expression (user filter + page-groups IN clauses).
reqFilterExpr := ""
if req.Filter != nil {
reqFilterExpr = req.Filter.Expression
}
pageGroupsFilterExpr := buildPageGroupsFilterExpr(pageGroups)
filterExpr := mergeFilterExpressions(reqFilterExpr, pageGroupsFilterExpr)
// Resolve tables. Same convention as hosts (distributed names from helpers).
adjustedStart, adjustedEnd, _, localTimeSeriesTable := telemetrymetrics.WhichTSTableToUse(
uint64(req.Start), uint64(req.End), nil,
)
samplesTable := telemetrymetrics.WhichSamplesTableToUse(
uint64(req.Start), uint64(req.End),
metrictypes.UnspecifiedType, metrictypes.TimeAggregationUnspecified, nil,
)
valueCol := telemetrymetrics.ValueColumnForSamplesTable(samplesTable)
// ----- timeSeriesFPs -----
timeSeriesFPs := sqlbuilder.NewSelectBuilder()
timeSeriesFPsSelectCols := []string{
"fingerprint",
fmt.Sprintf("JSONExtractString(labels, %s) AS pod_uid", timeSeriesFPs.Var(podUIDAttrKey)),
}
for _, key := range req.GroupBy {
timeSeriesFPsSelectCols = append(timeSeriesFPsSelectCols,
fmt.Sprintf("JSONExtractString(labels, %s) AS %s", timeSeriesFPs.Var(key.Name), quoteIdentifier(key.Name)),
)
}
timeSeriesFPs.Select(timeSeriesFPsSelectCols...)
timeSeriesFPs.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, localTimeSeriesTable))
timeSeriesFPs.Where(
timeSeriesFPs.E("metric_name", podPhaseMetricName),
timeSeriesFPs.GE("unix_milli", adjustedStart),
timeSeriesFPs.L("unix_milli", adjustedEnd),
)
if filterExpr != "" {
filterClause, err := m.buildFilterClause(ctx, &qbtypes.Filter{Expression: filterExpr}, req.Start, req.End)
if err != nil {
return nil, err
}
if filterClause != nil {
timeSeriesFPs.AddWhereClause(filterClause)
}
}
timeSeriesFPsGroupBy := []string{"fingerprint", "pod_uid"}
for _, key := range req.GroupBy {
timeSeriesFPsGroupBy = append(timeSeriesFPsGroupBy, quoteIdentifier(key.Name))
}
timeSeriesFPs.GroupBy(timeSeriesFPsGroupBy...)
timeSeriesFPsSQL, timeSeriesFPsArgs := timeSeriesFPs.BuildWithFlavor(sqlbuilder.ClickHouse)
latestPhasePerPod := sqlbuilder.NewSelectBuilder()
latestPhasePerPodSelectCols := []string{"tsfp.pod_uid AS pod_uid"}
latestPhasePerPodGroupBy := []string{"pod_uid"}
for _, key := range req.GroupBy {
col := quoteIdentifier(key.Name)
latestPhasePerPodSelectCols = append(latestPhasePerPodSelectCols, fmt.Sprintf("tsfp.%s AS %s", col, col))
latestPhasePerPodGroupBy = append(latestPhasePerPodGroupBy, col)
}
latestPhasePerPodSelectCols = append(latestPhasePerPodSelectCols,
fmt.Sprintf("argMax(samples.%s, samples.unix_milli) AS phase_value", valueCol),
)
latestPhasePerPod.Select(latestPhasePerPodSelectCols...)
latestPhasePerPod.From(fmt.Sprintf(
"%s.%s AS samples INNER JOIN time_series_fps AS tsfp ON samples.fingerprint = tsfp.fingerprint",
telemetrymetrics.DBName, samplesTable,
))
latestPhasePerPod.Where(
latestPhasePerPod.E("samples.metric_name", podPhaseMetricName),
latestPhasePerPod.GE("samples.unix_milli", req.Start),
latestPhasePerPod.L("samples.unix_milli", req.End),
"tsfp.pod_uid != ''",
)
latestPhasePerPod.GroupBy(latestPhasePerPodGroupBy...)
latestPhasePerPodSQL, latestPhasePerPodArgs := latestPhasePerPod.BuildWithFlavor(sqlbuilder.ClickHouse)
// ----- countPodsPerPhase (outer SELECT) -----
countPodsPerPhaseSelectCols := make([]string, 0, len(req.GroupBy)+5)
countPodsPerPhaseGroupBy := make([]string, 0, len(req.GroupBy))
for _, key := range req.GroupBy {
col := quoteIdentifier(key.Name)
countPodsPerPhaseSelectCols = append(countPodsPerPhaseSelectCols, col)
countPodsPerPhaseGroupBy = append(countPodsPerPhaseGroupBy, col)
}
countPodsPerPhaseSelectCols = append(countPodsPerPhaseSelectCols,
fmt.Sprintf("uniqExactIf(pod_uid, phase_value = %d) AS pending_count", inframonitoringtypes.PodPhaseNumPending),
fmt.Sprintf("uniqExactIf(pod_uid, phase_value = %d) AS running_count", inframonitoringtypes.PodPhaseNumRunning),
fmt.Sprintf("uniqExactIf(pod_uid, phase_value = %d) AS succeeded_count", inframonitoringtypes.PodPhaseNumSucceeded),
fmt.Sprintf("uniqExactIf(pod_uid, phase_value = %d) AS failed_count", inframonitoringtypes.PodPhaseNumFailed),
fmt.Sprintf("uniqExactIf(pod_uid, phase_value = %d) AS unknown_count", inframonitoringtypes.PodPhaseNumUnknown),
)
countPodsPerPhaseSQL := fmt.Sprintf(
"SELECT %s FROM latest_phase_per_pod GROUP BY %s",
strings.Join(countPodsPerPhaseSelectCols, ", "),
strings.Join(countPodsPerPhaseGroupBy, ", "),
)
// Combine CTEs + outer.
cteFragments := []string{
fmt.Sprintf("time_series_fps AS (%s)", timeSeriesFPsSQL),
fmt.Sprintf("latest_phase_per_pod AS (%s)", latestPhasePerPodSQL),
}
finalSQL := querybuilder.CombineCTEs(cteFragments) + countPodsPerPhaseSQL
finalArgs := querybuilder.PrependArgs([][]any{timeSeriesFPsArgs, latestPhasePerPodArgs}, nil)
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, finalSQL, finalArgs...)
if err != nil {
return nil, err
}
defer rows.Close()
result := make(map[string]podPhaseCounts)
for rows.Next() {
groupVals := make([]string, len(req.GroupBy))
scanPtrs := make([]any, 0, len(req.GroupBy)+5)
for i := range groupVals {
scanPtrs = append(scanPtrs, &groupVals[i])
}
var pending, running, succeeded, failed, unknown uint64
scanPtrs = append(scanPtrs, &pending, &running, &succeeded, &failed, &unknown)
if err := rows.Scan(scanPtrs...); err != nil {
return nil, err
}
result[compositeKeyFromList(groupVals)] = podPhaseCounts{
Pending: int(pending),
Running: int(running),
Succeeded: int(succeeded),
Failed: int(failed),
Unknown: int(unknown),
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return result, nil
}

View File

@@ -1,178 +0,0 @@
package implinframonitoring
import (
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
const (
podUIDAttrKey = "k8s.pod.uid"
podStartTimeAttrKey = "k8s.pod.start_time"
podPhaseMetricName = "k8s.pod.phase"
)
var podUIDGroupByKey = qbtypes.GroupByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: podUIDAttrKey,
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
}
var podsTableMetricNamesList = []string{
"k8s.pod.cpu.usage",
"k8s.pod.cpu_request_utilization",
"k8s.pod.cpu_limit_utilization",
"k8s.pod.memory.working_set",
"k8s.pod.memory_request_utilization",
"k8s.pod.memory_limit_utilization",
"k8s.pod.phase",
}
var podAttrKeysForMetadata = []string{
"k8s.pod.uid",
"k8s.pod.name",
"k8s.namespace.name",
"k8s.node.name",
"k8s.deployment.name",
"k8s.statefulset.name",
"k8s.daemonset.name",
"k8s.job.name",
"k8s.cronjob.name",
"k8s.cluster.name",
"k8s.pod.start_time",
}
var orderByToPodsQueryNames = map[string][]string{
inframonitoringtypes.PodsOrderByCPU: {"A"},
inframonitoringtypes.PodsOrderByCPURequest: {"B"},
inframonitoringtypes.PodsOrderByCPULimit: {"C"},
inframonitoringtypes.PodsOrderByMemory: {"D"},
inframonitoringtypes.PodsOrderByMemoryRequest: {"E"},
inframonitoringtypes.PodsOrderByMemoryLimit: {"F"},
}
// newPodsTableListQuery builds the composite QB v5 request for the pods list.
// Pod phase is derived separately via getPerGroupPodPhaseCounts (works for both
// list and grouped_list modes), so no phase query is included here.
func (m *module) newPodsTableListQuery() *qbtypes.QueryRangeRequest {
queries := []qbtypes.QueryEnvelope{
// Query A: CPU usage
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "A",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.cpu.usage",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query B: CPU request utilization
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "B",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.cpu_request_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query C: CPU limit utilization
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "C",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.cpu_limit_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query D: Memory working set
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "D",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.memory.working_set",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query E: Memory request utilization
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "E",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.memory_request_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query F: Memory limit utilization
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "F",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.memory_limit_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
}
return &qbtypes.QueryRangeRequest{
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: queries,
},
}
}

View File

@@ -10,12 +10,8 @@ import (
type Handler interface {
ListHosts(http.ResponseWriter, *http.Request)
ListPods(http.ResponseWriter, *http.Request)
ListNodes(http.ResponseWriter, *http.Request)
}
type Module interface {
ListHosts(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableHosts) (*inframonitoringtypes.Hosts, error)
ListPods(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostablePods) (*inframonitoringtypes.Pods, error)
ListNodes(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableNodes) (*inframonitoringtypes.Nodes, error)
}

View File

@@ -124,17 +124,6 @@ func CountExpressionForSamplesTable(tableName string) string {
return "sum(count)"
}
// ValueColumnForSamplesTable returns the column name holding the sample value:
// "last" for the 5m/30m aggregated tables, "value" otherwise.
// note all the other columns in the aggregated samples tables are nothing but aggregations.
// and so "last" is the value column for these tables.
func ValueColumnForSamplesTable(tableName string) string {
if tableName == SamplesV4Agg5mTableName || tableName == SamplesV4Agg30mTableName {
return "last"
}
return "value"
}
// start and end are in milliseconds
// we have three tables for samples
// 1. distributed_samples_v4

View File

@@ -49,7 +49,7 @@ type HostFilter struct {
FilterByStatus HostStatus `json:"filterByStatus"`
}
// Validate ensures PostableHosts contains acceptable values.
// Validate ensures HostsListRequest contains acceptable values.
func (req *PostableHosts) Validate() error {
if req == nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")

View File

@@ -1,103 +0,0 @@
package inframonitoringtypes
import (
"encoding/json"
"slices"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
)
type Nodes struct {
Type ResponseType `json:"type" required:"true"`
Records []NodeRecord `json:"records" required:"true"`
Total int `json:"total" required:"true"`
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck" required:"true"`
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention" required:"true"`
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
}
type NodeRecord struct {
NodeName string `json:"nodeName" required:"true"`
Condition NodeCondition `json:"condition" required:"true"`
ReadyNodesCount int `json:"readyNodesCount" required:"true"`
NotReadyNodesCount int `json:"notReadyNodesCount" required:"true"`
NodeCPU float64 `json:"nodeCPU" required:"true"`
NodeCPUAllocatable float64 `json:"nodeCPUAllocatable" required:"true"`
NodeMemory float64 `json:"nodeMemory" required:"true"`
NodeMemoryAllocatable float64 `json:"nodeMemoryAllocatable" required:"true"`
Meta map[string]interface{} `json:"meta" required:"true"`
}
// PostableNodes is the request body for the v2 nodes list API.
type PostableNodes struct {
Start int64 `json:"start" required:"true"`
End int64 `json:"end" required:"true"`
Filter *qbtypes.Filter `json:"filter"`
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
OrderBy *qbtypes.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit" required:"true"`
}
// Validate ensures PostableNodes contains acceptable values.
func (req *PostableNodes) Validate() error {
if req == nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
}
if req.Start <= 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid start time %d: start must be greater than 0",
req.Start,
)
}
if req.End <= 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid end time %d: end must be greater than 0",
req.End,
)
}
if req.Start >= req.End {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid time range: start (%d) must be less than end (%d)",
req.Start,
req.End,
)
}
if req.Limit < 1 || req.Limit > 5000 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
}
if req.Offset < 0 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
}
if req.OrderBy != nil {
if !slices.Contains(NodesValidOrderByKeys, req.OrderBy.Key.Name) {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
}
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
}
}
return nil
}
// UnmarshalJSON validates input immediately after decoding.
func (req *PostableNodes) UnmarshalJSON(data []byte) error {
type raw PostableNodes
var decoded raw
if err := json.Unmarshal(data, &decoded); err != nil {
return err
}
*req = PostableNodes(decoded)
return req.Validate()
}

View File

@@ -1,42 +0,0 @@
package inframonitoringtypes
import "github.com/SigNoz/signoz/pkg/valuer"
type NodeCondition struct {
valuer.String
}
var (
NodeConditionReady = NodeCondition{valuer.NewString("ready")}
NodeConditionNotReady = NodeCondition{valuer.NewString("not_ready")}
NodeConditionNone = NodeCondition{valuer.NewString("")}
)
func (NodeCondition) Enum() []any {
return []any{
NodeConditionReady,
NodeConditionNotReady,
NodeConditionNone,
}
}
// Numeric values emitted by the k8s.node.condition_ready metric
// (source: OTel kubeletstats receiver).
const (
NodeConditionNumReady = 1
NodeConditionNumNotReady = 0
)
const (
NodesOrderByCPU = "cpu"
NodesOrderByCPUAllocatable = "cpu_allocatable"
NodesOrderByMemory = "memory"
NodesOrderByMemoryAllocatable = "memory_allocatable"
)
var NodesValidOrderByKeys = []string{
NodesOrderByCPU,
NodesOrderByCPUAllocatable,
NodesOrderByMemory,
NodesOrderByMemoryAllocatable,
}

View File

@@ -1,255 +0,0 @@
package inframonitoringtypes
import (
"testing"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/stretchr/testify/require"
)
func TestPostableNodes_Validate(t *testing.T) {
tests := []struct {
name string
req *PostableNodes
wantErr bool
}{
{
name: "valid request",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: false,
},
{
name: "nil request",
req: nil,
wantErr: true,
},
{
name: "start time zero",
req: &PostableNodes{
Start: 0,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time negative",
req: &PostableNodes{
Start: -1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "end time zero",
req: &PostableNodes{
Start: 1000,
End: 0,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time greater than end time",
req: &PostableNodes{
Start: 2000,
End: 1000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time equal to end time",
req: &PostableNodes{
Start: 1000,
End: 1000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "limit zero",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 0,
Offset: 0,
},
wantErr: true,
},
{
name: "limit negative",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: -10,
Offset: 0,
},
wantErr: true,
},
{
name: "limit exceeds max",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 5001,
Offset: 0,
},
wantErr: true,
},
{
name: "offset negative",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 100,
Offset: -5,
},
wantErr: true,
},
{
name: "orderBy nil is valid",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: false,
},
{
name: "orderBy with valid key cpu and direction asc",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: NodesOrderByCPU,
},
},
Direction: qbtypes.OrderDirectionAsc,
},
},
wantErr: false,
},
{
name: "orderBy with valid key cpu_allocatable and direction desc",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: NodesOrderByCPUAllocatable,
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: false,
},
{
name: "orderBy with valid key memory_allocatable and direction asc",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: NodesOrderByMemoryAllocatable,
},
},
Direction: qbtypes.OrderDirectionAsc,
},
},
wantErr: false,
},
{
name: "orderBy with condition key is rejected",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "condition",
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: true,
},
{
name: "orderBy with invalid key",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "unknown",
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: true,
},
{
name: "orderBy with valid key but invalid direction",
req: &PostableNodes{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: NodesOrderByMemory,
},
},
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.req.Validate()
if tt.wantErr {
require.Error(t, err)
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
} else {
require.NoError(t, err)
}
})
}
}

View File

@@ -1,109 +0,0 @@
package inframonitoringtypes
import (
"encoding/json"
"slices"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
)
type Pods struct {
Type ResponseType `json:"type" required:"true"`
Records []PodRecord `json:"records" required:"true"`
Total int `json:"total" required:"true"`
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck" required:"true"`
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention" required:"true"`
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
}
type PodRecord struct {
PodUID string `json:"podUID" required:"true"`
PodCPU float64 `json:"podCPU" required:"true"`
PodCPURequest float64 `json:"podCPURequest" required:"true"`
PodCPULimit float64 `json:"podCPULimit" required:"true"`
PodMemory float64 `json:"podMemory" required:"true"`
PodMemoryRequest float64 `json:"podMemoryRequest" required:"true"`
PodMemoryLimit float64 `json:"podMemoryLimit" required:"true"`
PodPhase PodPhase `json:"podPhase" required:"true"`
PendingPodCount int `json:"pendingPodCount" required:"true"`
RunningPodCount int `json:"runningPodCount" required:"true"`
SucceededPodCount int `json:"succeededPodCount" required:"true"`
FailedPodCount int `json:"failedPodCount" required:"true"`
UnknownPodCount int `json:"unknownPodCount" required:"true"`
PodAge int64 `json:"podAge" required:"true"`
Meta map[string]interface{} `json:"meta" required:"true"`
}
// PostablePods is the request body for the v2 pods list API.
type PostablePods struct {
Start int64 `json:"start" required:"true"`
End int64 `json:"end" required:"true"`
Filter *qbtypes.Filter `json:"filter"`
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
OrderBy *qbtypes.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit" required:"true"`
}
// Validate ensures PostablePods contains acceptable values.
func (req *PostablePods) Validate() error {
if req == nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
}
if req.Start <= 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid start time %d: start must be greater than 0",
req.Start,
)
}
if req.End <= 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid end time %d: end must be greater than 0",
req.End,
)
}
if req.Start >= req.End {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid time range: start (%d) must be less than end (%d)",
req.Start,
req.End,
)
}
if req.Limit < 1 || req.Limit > 5000 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
}
if req.Offset < 0 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
}
if req.OrderBy != nil {
if !slices.Contains(PodsValidOrderByKeys, req.OrderBy.Key.Name) {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
}
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
}
}
return nil
}
// UnmarshalJSON validates input immediately after decoding.
func (req *PostablePods) UnmarshalJSON(data []byte) error {
type raw PostablePods
var decoded raw
if err := json.Unmarshal(data, &decoded); err != nil {
return err
}
*req = PostablePods(decoded)
return req.Validate()
}

View File

@@ -1,55 +0,0 @@
package inframonitoringtypes
import "github.com/SigNoz/signoz/pkg/valuer"
type PodPhase struct {
valuer.String
}
var (
PodPhasePending = PodPhase{valuer.NewString("pending")}
PodPhaseRunning = PodPhase{valuer.NewString("running")}
PodPhaseSucceeded = PodPhase{valuer.NewString("succeeded")}
PodPhaseFailed = PodPhase{valuer.NewString("failed")}
PodPhaseUnknown = PodPhase{valuer.NewString("unknown")}
PodPhaseNone = PodPhase{valuer.NewString("")}
)
func (PodPhase) Enum() []any {
return []any{
PodPhasePending,
PodPhaseRunning,
PodPhaseSucceeded,
PodPhaseFailed,
PodPhaseUnknown,
PodPhaseNone,
}
}
// Numeric pod phase values emitted by the k8s.pod.phase metric
// (source: OTel kubeletstats receiver).
const (
PodPhaseNumPending = 1
PodPhaseNumRunning = 2
PodPhaseNumSucceeded = 3
PodPhaseNumFailed = 4
PodPhaseNumUnknown = 5
)
const (
PodsOrderByCPU = "cpu"
PodsOrderByCPURequest = "cpu_request"
PodsOrderByCPULimit = "cpu_limit"
PodsOrderByMemory = "memory"
PodsOrderByMemoryRequest = "memory_request"
PodsOrderByMemoryLimit = "memory_limit"
)
var PodsValidOrderByKeys = []string{
PodsOrderByCPU,
PodsOrderByCPURequest,
PodsOrderByCPULimit,
PodsOrderByMemory,
PodsOrderByMemoryRequest,
PodsOrderByMemoryLimit,
}

View File

@@ -1,219 +0,0 @@
package inframonitoringtypes
import (
"testing"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/stretchr/testify/require"
)
func TestPostablePods_Validate(t *testing.T) {
tests := []struct {
name string
req *PostablePods
wantErr bool
}{
{
name: "valid request",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: false,
},
{
name: "nil request",
req: nil,
wantErr: true,
},
{
name: "start time zero",
req: &PostablePods{
Start: 0,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time negative",
req: &PostablePods{
Start: -1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "end time zero",
req: &PostablePods{
Start: 1000,
End: 0,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time greater than end time",
req: &PostablePods{
Start: 2000,
End: 1000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time equal to end time",
req: &PostablePods{
Start: 1000,
End: 1000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "limit zero",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 0,
Offset: 0,
},
wantErr: true,
},
{
name: "limit negative",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: -10,
Offset: 0,
},
wantErr: true,
},
{
name: "limit exceeds max",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 5001,
Offset: 0,
},
wantErr: true,
},
{
name: "offset negative",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: -5,
},
wantErr: true,
},
{
name: "orderBy nil is valid",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: false,
},
{
name: "orderBy with valid key cpu and direction asc",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: PodsOrderByCPU,
},
},
Direction: qbtypes.OrderDirectionAsc,
},
},
wantErr: false,
},
{
name: "orderBy with phase key is rejected",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "phase",
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: true,
},
{
name: "orderBy with invalid key",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "unknown",
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: true,
},
{
name: "orderBy with valid key but invalid direction",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: PodsOrderByMemory,
},
},
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.req.Validate()
if tt.wantErr {
require.Error(t, err)
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
} else {
require.NoError(t, err)
}
})
}
}

View File

@@ -11,9 +11,7 @@ import (
"github.com/uptrace/bun"
)
var (
ErrCodeInvalidPlannedMaintenancePayload = errors.MustNewCode("invalid_planned_maintenance_payload")
)
var ErrCodeInvalidPlannedMaintenancePayload = errors.MustNewCode("invalid_planned_maintenance_payload")
type MaintenanceStatus struct {
valuer.String
@@ -63,15 +61,15 @@ type StorablePlannedMaintenance struct {
}
type PlannedMaintenance struct {
ID valuer.UUID `json:"id" required:"true"`
Name string `json:"name" required:"true"`
Description string `json:"description"`
Schedule *Schedule `json:"schedule" required:"true"`
RuleIDs []string `json:"alertIds"`
CreatedAt time.Time `json:"createdAt"`
CreatedBy string `json:"createdBy"`
UpdatedAt time.Time `json:"updatedAt"`
UpdatedBy string `json:"updatedBy"`
ID valuer.UUID `json:"id" required:"true"`
Name string `json:"name" required:"true"`
Description string `json:"description"`
Schedule *Schedule `json:"schedule" required:"true"`
RuleIDs []string `json:"alertIds"`
CreatedAt time.Time `json:"createdAt"`
CreatedBy string `json:"createdBy"`
UpdatedAt time.Time `json:"updatedAt"`
UpdatedBy string `json:"updatedBy"`
Status MaintenanceStatus `json:"status" required:"true"`
Kind MaintenanceKind `json:"kind" required:"true"`
}
@@ -161,8 +159,11 @@ func (m *PlannedMaintenance) ShouldSkip(ruleID string, now time.Time) bool {
currentTime := now.In(loc)
// fixed schedule
if !m.Schedule.StartTime.IsZero() && !m.Schedule.EndTime.IsZero() {
// fixed schedule — only when no recurrence is configured.
// When recurrence is set, the recurring check below handles everything;
// falling through here would cause the window to match the absolute
// StartTimeEndTime range instead of the daily/weekly/monthly pattern.
if m.Schedule.Recurrence == nil && !m.Schedule.StartTime.IsZero() && !m.Schedule.EndTime.IsZero() {
startTime := m.Schedule.StartTime.In(loc)
endTime := m.Schedule.EndTime.In(loc)
if currentTime.Equal(startTime) || currentTime.Equal(endTime) ||
@@ -333,6 +334,11 @@ func (m *PlannedMaintenance) Validate() error {
}
if m.Schedule.Recurrence != nil {
// Fixed range and recurrence are mutually exclusive.
if !m.Schedule.StartTime.IsZero() && !m.Schedule.EndTime.IsZero() {
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidPlannedMaintenancePayload,
"cannot set both a fixed time range (startTime/endTime) and a recurrence; use one or the other")
}
if m.Schedule.Recurrence.RepeatType.IsZero() {
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidPlannedMaintenancePayload, "missing repeat type in the payload")
}

View File

@@ -1,6 +1,7 @@
package ruletypes
import (
"strings"
"testing"
"time"
@@ -13,7 +14,6 @@ func timePtr(t time.Time) *time.Time {
}
func TestShouldSkipMaintenance(t *testing.T) {
cases := []struct {
name string
maintenance *PlannedMaintenance
@@ -499,7 +499,7 @@ func TestShouldSkipMaintenance(t *testing.T) {
},
},
},
ts: time.Date(2024, 04, 1, 12, 10, 0, 0, time.UTC),
ts: time.Date(2024, 4, 1, 12, 10, 0, 0, time.UTC),
skip: true,
},
{
@@ -508,14 +508,14 @@ func TestShouldSkipMaintenance(t *testing.T) {
Schedule: &Schedule{
Timezone: "UTC",
Recurrence: &Recurrence{
StartTime: time.Date(2024, 04, 01, 12, 0, 0, 0, time.UTC),
StartTime: time.Date(2024, 4, 1, 12, 0, 0, 0, time.UTC),
Duration: valuer.MustParseTextDuration("2h"),
RepeatType: RepeatTypeWeekly,
RepeatOn: []RepeatOn{RepeatOnMonday},
},
},
},
ts: time.Date(2024, 04, 15, 12, 10, 0, 0, time.UTC),
ts: time.Date(2024, 4, 15, 12, 10, 0, 0, time.UTC),
skip: true,
},
{
@@ -524,14 +524,14 @@ func TestShouldSkipMaintenance(t *testing.T) {
Schedule: &Schedule{
Timezone: "UTC",
Recurrence: &Recurrence{
StartTime: time.Date(2024, 04, 01, 12, 0, 0, 0, time.UTC),
StartTime: time.Date(2024, 4, 1, 12, 0, 0, 0, time.UTC),
Duration: valuer.MustParseTextDuration("2h"),
RepeatType: RepeatTypeWeekly,
RepeatOn: []RepeatOn{RepeatOnMonday},
},
},
},
ts: time.Date(2024, 04, 14, 12, 10, 0, 0, time.UTC), // 14th 04 is sunday
ts: time.Date(2024, 4, 14, 12, 10, 0, 0, time.UTC), // 14th 04 is sunday
skip: false,
},
{
@@ -540,14 +540,14 @@ func TestShouldSkipMaintenance(t *testing.T) {
Schedule: &Schedule{
Timezone: "UTC",
Recurrence: &Recurrence{
StartTime: time.Date(2024, 04, 01, 12, 0, 0, 0, time.UTC),
StartTime: time.Date(2024, 4, 1, 12, 0, 0, 0, time.UTC),
Duration: valuer.MustParseTextDuration("2h"),
RepeatType: RepeatTypeWeekly,
RepeatOn: []RepeatOn{RepeatOnMonday},
},
},
},
ts: time.Date(2024, 04, 16, 12, 10, 0, 0, time.UTC), // 16th 04 is tuesday
ts: time.Date(2024, 4, 16, 12, 10, 0, 0, time.UTC), // 16th 04 is tuesday
skip: false,
},
{
@@ -556,14 +556,14 @@ func TestShouldSkipMaintenance(t *testing.T) {
Schedule: &Schedule{
Timezone: "UTC",
Recurrence: &Recurrence{
StartTime: time.Date(2024, 04, 01, 12, 0, 0, 0, time.UTC),
StartTime: time.Date(2024, 4, 1, 12, 0, 0, 0, time.UTC),
Duration: valuer.MustParseTextDuration("2h"),
RepeatType: RepeatTypeWeekly,
RepeatOn: []RepeatOn{RepeatOnMonday},
},
},
},
ts: time.Date(2024, 05, 06, 12, 10, 0, 0, time.UTC),
ts: time.Date(2024, 5, 6, 12, 10, 0, 0, time.UTC),
skip: true,
},
{
@@ -572,14 +572,14 @@ func TestShouldSkipMaintenance(t *testing.T) {
Schedule: &Schedule{
Timezone: "UTC",
Recurrence: &Recurrence{
StartTime: time.Date(2024, 04, 01, 12, 0, 0, 0, time.UTC),
StartTime: time.Date(2024, 4, 1, 12, 0, 0, 0, time.UTC),
Duration: valuer.MustParseTextDuration("2h"),
RepeatType: RepeatTypeWeekly,
RepeatOn: []RepeatOn{RepeatOnMonday},
},
},
},
ts: time.Date(2024, 05, 06, 14, 00, 0, 0, time.UTC),
ts: time.Date(2024, 5, 6, 14, 0, 0, 0, time.UTC),
skip: true,
},
{
@@ -588,13 +588,13 @@ func TestShouldSkipMaintenance(t *testing.T) {
Schedule: &Schedule{
Timezone: "UTC",
Recurrence: &Recurrence{
StartTime: time.Date(2024, 04, 04, 12, 0, 0, 0, time.UTC),
StartTime: time.Date(2024, 4, 4, 12, 0, 0, 0, time.UTC),
Duration: valuer.MustParseTextDuration("2h"),
RepeatType: RepeatTypeMonthly,
},
},
},
ts: time.Date(2024, 04, 04, 12, 10, 0, 0, time.UTC),
ts: time.Date(2024, 4, 4, 12, 10, 0, 0, time.UTC),
skip: true,
},
{
@@ -603,13 +603,13 @@ func TestShouldSkipMaintenance(t *testing.T) {
Schedule: &Schedule{
Timezone: "UTC",
Recurrence: &Recurrence{
StartTime: time.Date(2024, 04, 04, 12, 0, 0, 0, time.UTC),
StartTime: time.Date(2024, 4, 4, 12, 0, 0, 0, time.UTC),
Duration: valuer.MustParseTextDuration("2h"),
RepeatType: RepeatTypeMonthly,
},
},
},
ts: time.Date(2024, 04, 04, 14, 10, 0, 0, time.UTC),
ts: time.Date(2024, 4, 4, 14, 10, 0, 0, time.UTC),
skip: false,
},
{
@@ -618,13 +618,53 @@ func TestShouldSkipMaintenance(t *testing.T) {
Schedule: &Schedule{
Timezone: "UTC",
Recurrence: &Recurrence{
StartTime: time.Date(2024, 04, 04, 12, 0, 0, 0, time.UTC),
StartTime: time.Date(2024, 4, 4, 12, 0, 0, 0, time.UTC),
Duration: valuer.MustParseTextDuration("2h"),
RepeatType: RepeatTypeMonthly,
},
},
},
ts: time.Date(2024, 05, 04, 12, 10, 0, 0, time.UTC),
ts: time.Date(2024, 5, 4, 12, 10, 0, 0, time.UTC),
skip: true,
},
// #7548: recurring daily with Schedule.StartTime/EndTime also set.
// The recurrence should govern — not the fixed range.
{
name: "recurring-daily-with-fixed-times-outside-daily-window",
maintenance: &PlannedMaintenance{
Schedule: &Schedule{
Timezone: "UTC",
// These fixed fields should be ignored when Recurrence is set.
StartTime: time.Date(2024, 6, 1, 10, 0, 0, 0, time.UTC),
EndTime: time.Date(2024, 6, 30, 18, 0, 0, 0, time.UTC),
Recurrence: &Recurrence{
StartTime: time.Date(2024, 6, 1, 14, 0, 0, 0, time.UTC), // daily at 14:00
Duration: valuer.MustParseTextDuration("2h"), // until 16:00
RepeatType: RepeatTypeDaily,
},
},
},
// 11:00 is inside the fixed range but outside the daily 14:00-16:00 window.
// Before the fix this returned true (bug); after fix it returns false.
ts: time.Date(2024, 6, 15, 11, 0, 0, 0, time.UTC),
skip: false,
},
{
name: "recurring-daily-with-fixed-times-inside-daily-window",
maintenance: &PlannedMaintenance{
Schedule: &Schedule{
Timezone: "UTC",
StartTime: time.Date(2024, 6, 1, 10, 0, 0, 0, time.UTC),
EndTime: time.Date(2024, 6, 30, 18, 0, 0, 0, time.UTC),
Recurrence: &Recurrence{
StartTime: time.Date(2024, 6, 1, 14, 0, 0, 0, time.UTC),
Duration: valuer.MustParseTextDuration("2h"),
RepeatType: RepeatTypeDaily,
},
},
},
// 15:00 is inside the daily 14:00-16:00 window — should skip.
ts: time.Date(2024, 6, 15, 15, 0, 0, 0, time.UTC),
skip: true,
},
}
@@ -636,3 +676,27 @@ func TestShouldSkipMaintenance(t *testing.T) {
}
}
}
// #7548: Validate rejects payloads that set both fixed range and recurrence.
func TestValidate_FixedAndRecurrenceMutuallyExclusive(t *testing.T) {
m := PlannedMaintenance{
Name: "test",
Schedule: &Schedule{
Timezone: "UTC",
StartTime: time.Now().Add(-time.Hour),
EndTime: time.Now().Add(time.Hour),
Recurrence: &Recurrence{
StartTime: time.Now().Add(-time.Hour),
Duration: valuer.MustParseTextDuration("2h"),
RepeatType: RepeatTypeDaily,
},
},
}
err := m.Validate()
if err == nil {
t.Fatal("expected validation error when both fixed range and recurrence are set")
}
if !strings.Contains(err.Error(), "cannot set both") {
t.Errorf("unexpected error message: %v", err)
}
}