mirror of
https://github.com/SigNoz/signoz.git
synced 2026-04-28 14:40:32 +01:00
Compare commits
3 Commits
nv/v2-dash
...
fix/recurr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d0e748ed37 | ||
|
|
ffd493617f | ||
|
|
3f95d35ad5 |
56
.github/CODEOWNERS
vendored
56
.github/CODEOWNERS
vendored
@@ -52,49 +52,49 @@ go.mod @therealpandey
|
||||
|
||||
# Querier Owners
|
||||
|
||||
/pkg/querier/ @srikanthccv
|
||||
/pkg/variables/ @srikanthccv
|
||||
/pkg/types/querybuildertypes/ @srikanthccv
|
||||
/pkg/types/telemetrytypes/ @srikanthccv
|
||||
/pkg/querybuilder/ @srikanthccv
|
||||
/pkg/telemetrylogs/ @srikanthccv
|
||||
/pkg/telemetrymetadata/ @srikanthccv
|
||||
/pkg/telemetrymetrics/ @srikanthccv
|
||||
/pkg/telemetrytraces/ @srikanthccv
|
||||
/pkg/querier/ @srikanthccv @therealpandey
|
||||
/pkg/variables/ @srikanthccv @therealpandey
|
||||
/pkg/types/querybuildertypes/ @srikanthccv @therealpandey
|
||||
/pkg/types/telemetrytypes/ @srikanthccv @therealpandey
|
||||
/pkg/querybuilder/ @srikanthccv @therealpandey
|
||||
/pkg/telemetrylogs/ @srikanthccv @therealpandey
|
||||
/pkg/telemetrymetadata/ @srikanthccv @therealpandey
|
||||
/pkg/telemetrymetrics/ @srikanthccv @therealpandey
|
||||
/pkg/telemetrytraces/ @srikanthccv @therealpandey
|
||||
|
||||
# Metrics
|
||||
|
||||
/pkg/types/metrictypes/ @srikanthccv
|
||||
/pkg/types/metricsexplorertypes/ @srikanthccv
|
||||
/pkg/modules/metricsexplorer/ @srikanthccv
|
||||
/pkg/prometheus/ @srikanthccv
|
||||
/pkg/types/metrictypes/ @srikanthccv @therealpandey
|
||||
/pkg/types/metricsexplorertypes/ @srikanthccv @therealpandey
|
||||
/pkg/modules/metricsexplorer/ @srikanthccv @therealpandey
|
||||
/pkg/prometheus/ @srikanthccv @therealpandey
|
||||
|
||||
# APM
|
||||
|
||||
/pkg/types/servicetypes/ @srikanthccv
|
||||
/pkg/types/apdextypes/ @srikanthccv
|
||||
/pkg/modules/apdex/ @srikanthccv
|
||||
/pkg/modules/services/ @srikanthccv
|
||||
/pkg/types/servicetypes/ @srikanthccv @therealpandey
|
||||
/pkg/types/apdextypes/ @srikanthccv @therealpandey
|
||||
/pkg/modules/apdex/ @srikanthccv @therealpandey
|
||||
/pkg/modules/services/ @srikanthccv @therealpandey
|
||||
|
||||
# Dashboard
|
||||
|
||||
/pkg/types/dashboardtypes/ @srikanthccv
|
||||
/pkg/modules/dashboard/ @srikanthccv
|
||||
/pkg/types/dashboardtypes/ @srikanthccv @therealpandey
|
||||
/pkg/modules/dashboard/ @srikanthccv @therealpandey
|
||||
|
||||
# Rule/Alertmanager
|
||||
|
||||
/pkg/types/ruletypes/ @srikanthccv
|
||||
/pkg/types/alertmanagertypes @srikanthccv
|
||||
/pkg/alertmanager/ @srikanthccv
|
||||
/pkg/ruler/ @srikanthccv
|
||||
/pkg/modules/rulestatehistory/ @srikanthccv
|
||||
/pkg/types/rulestatehistorytypes/ @srikanthccv
|
||||
/pkg/types/ruletypes/ @srikanthccv @therealpandey
|
||||
/pkg/types/alertmanagertypes @srikanthccv @therealpandey
|
||||
/pkg/alertmanager/ @srikanthccv @therealpandey
|
||||
/pkg/ruler/ @srikanthccv @therealpandey
|
||||
/pkg/modules/rulestatehistory/ @srikanthccv @therealpandey
|
||||
/pkg/types/rulestatehistorytypes/ @srikanthccv @therealpandey
|
||||
|
||||
# Correlation-adjacent
|
||||
|
||||
/pkg/contextlinks/ @srikanthccv
|
||||
/pkg/types/parsertypes/ @srikanthccv
|
||||
/pkg/queryparser/ @srikanthccv
|
||||
/pkg/contextlinks/ @srikanthccv @therealpandey
|
||||
/pkg/types/parsertypes/ @srikanthccv @therealpandey
|
||||
/pkg/queryparser/ @srikanthccv @therealpandey
|
||||
|
||||
# AuthN / AuthZ Owners
|
||||
|
||||
|
||||
@@ -2474,6 +2474,97 @@ components:
|
||||
- requiredMetricsCheck
|
||||
- endTimeBeforeRetention
|
||||
type: object
|
||||
InframonitoringtypesPodPhase:
|
||||
enum:
|
||||
- pending
|
||||
- running
|
||||
- succeeded
|
||||
- failed
|
||||
- unknown
|
||||
- ""
|
||||
type: string
|
||||
InframonitoringtypesPodRecord:
|
||||
properties:
|
||||
failedPodCount:
|
||||
type: integer
|
||||
meta:
|
||||
additionalProperties: {}
|
||||
nullable: true
|
||||
type: object
|
||||
pendingPodCount:
|
||||
type: integer
|
||||
podAge:
|
||||
format: int64
|
||||
type: integer
|
||||
podCPU:
|
||||
format: double
|
||||
type: number
|
||||
podCPULimit:
|
||||
format: double
|
||||
type: number
|
||||
podCPURequest:
|
||||
format: double
|
||||
type: number
|
||||
podMemory:
|
||||
format: double
|
||||
type: number
|
||||
podMemoryLimit:
|
||||
format: double
|
||||
type: number
|
||||
podMemoryRequest:
|
||||
format: double
|
||||
type: number
|
||||
podPhase:
|
||||
$ref: '#/components/schemas/InframonitoringtypesPodPhase'
|
||||
podUID:
|
||||
type: string
|
||||
runningPodCount:
|
||||
type: integer
|
||||
succeededPodCount:
|
||||
type: integer
|
||||
unknownPodCount:
|
||||
type: integer
|
||||
required:
|
||||
- podUID
|
||||
- podCPU
|
||||
- podCPURequest
|
||||
- podCPULimit
|
||||
- podMemory
|
||||
- podMemoryRequest
|
||||
- podMemoryLimit
|
||||
- podPhase
|
||||
- pendingPodCount
|
||||
- runningPodCount
|
||||
- succeededPodCount
|
||||
- failedPodCount
|
||||
- unknownPodCount
|
||||
- podAge
|
||||
- meta
|
||||
type: object
|
||||
InframonitoringtypesPods:
|
||||
properties:
|
||||
endTimeBeforeRetention:
|
||||
type: boolean
|
||||
records:
|
||||
items:
|
||||
$ref: '#/components/schemas/InframonitoringtypesPodRecord'
|
||||
nullable: true
|
||||
type: array
|
||||
requiredMetricsCheck:
|
||||
$ref: '#/components/schemas/InframonitoringtypesRequiredMetricsCheck'
|
||||
total:
|
||||
type: integer
|
||||
type:
|
||||
$ref: '#/components/schemas/InframonitoringtypesResponseType'
|
||||
warning:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5QueryWarnData'
|
||||
required:
|
||||
- type
|
||||
- records
|
||||
- total
|
||||
- requiredMetricsCheck
|
||||
- endTimeBeforeRetention
|
||||
type: object
|
||||
InframonitoringtypesPostableHosts:
|
||||
properties:
|
||||
end:
|
||||
@@ -2500,6 +2591,32 @@ components:
|
||||
- end
|
||||
- limit
|
||||
type: object
|
||||
InframonitoringtypesPostablePods:
|
||||
properties:
|
||||
end:
|
||||
format: int64
|
||||
type: integer
|
||||
filter:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5Filter'
|
||||
groupBy:
|
||||
items:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5GroupByKey'
|
||||
nullable: true
|
||||
type: array
|
||||
limit:
|
||||
type: integer
|
||||
offset:
|
||||
type: integer
|
||||
orderBy:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
|
||||
start:
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- start
|
||||
- end
|
||||
- limit
|
||||
type: object
|
||||
InframonitoringtypesRequiredMetricsCheck:
|
||||
properties:
|
||||
missingMetrics:
|
||||
@@ -10886,7 +11003,9 @@ paths:
|
||||
five metrics, and pagination via offset/limit. The response type is ''list''
|
||||
for the default host.name grouping or ''grouped_list'' for custom groupBy
|
||||
keys. Also reports missing required metrics and whether the requested time
|
||||
range falls before the data retention boundary.'
|
||||
range falls before the data retention boundary. Numeric metric fields (cpu,
|
||||
memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available
|
||||
for that field.'
|
||||
operationId: ListHosts
|
||||
requestBody:
|
||||
content:
|
||||
@@ -10940,6 +11059,79 @@ paths:
|
||||
summary: List Hosts for Infra Monitoring
|
||||
tags:
|
||||
- inframonitoring
|
||||
/api/v2/infra_monitoring/pods:
|
||||
post:
|
||||
deprecated: false
|
||||
description: 'Returns a paginated list of Kubernetes pods with key metrics:
|
||||
CPU usage, CPU request/limit utilization, memory working set, memory request/limit
|
||||
utilization, current pod phase (pending/running/succeeded/failed/unknown),
|
||||
and pod age (ms since start time). Each pod includes metadata attributes (namespace,
|
||||
node, workload owner such as deployment/statefulset/daemonset/job/cronjob,
|
||||
cluster). Supports filtering via a filter expression, custom groupBy to aggregate
|
||||
pods by any attribute, ordering by any of the six metrics (cpu, cpu_request,
|
||||
cpu_limit, memory, memory_request, memory_limit), and pagination via offset/limit.
|
||||
The response type is ''list'' for the default k8s.pod.uid grouping (each row
|
||||
is one pod with its current phase) or ''grouped_list'' for custom groupBy
|
||||
keys (each row aggregates pods in the group with per-phase counts: pendingPodCount,
|
||||
runningPodCount, succeededPodCount, failedPodCount, unknownPodCount derived
|
||||
from each pod''s latest phase in the window). Also reports missing required
|
||||
metrics and whether the requested time range falls before the data retention
|
||||
boundary. Numeric metric fields (podCPU, podCPURequest, podCPULimit, podMemory,
|
||||
podMemoryRequest, podMemoryLimit, podAge) return -1 as a sentinel when no
|
||||
data is available for that field.'
|
||||
operationId: ListPods
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InframonitoringtypesPostablePods'
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/InframonitoringtypesPods'
|
||||
status:
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
- data
|
||||
type: object
|
||||
description: OK
|
||||
"400":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Bad Request
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- VIEWER
|
||||
- tokenizer:
|
||||
- VIEWER
|
||||
summary: List Pods for Infra Monitoring
|
||||
tags:
|
||||
- inframonitoring
|
||||
/api/v2/livez:
|
||||
get:
|
||||
deprecated: false
|
||||
|
||||
@@ -13,7 +13,9 @@ import type {
|
||||
|
||||
import type {
|
||||
InframonitoringtypesPostableHostsDTO,
|
||||
InframonitoringtypesPostablePodsDTO,
|
||||
ListHosts200,
|
||||
ListPods200,
|
||||
RenderErrorResponseDTO,
|
||||
} from '../sigNoz.schemas';
|
||||
|
||||
@@ -21,7 +23,7 @@ import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
|
||||
import type { ErrorType, BodyType } from '../../../generatedAPIInstance';
|
||||
|
||||
/**
|
||||
* Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary.
|
||||
* Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (cpu, memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available for that field.
|
||||
* @summary List Hosts for Infra Monitoring
|
||||
*/
|
||||
export const listHosts = (
|
||||
@@ -104,3 +106,87 @@ export const useListHosts = <
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* Returns a paginated list of Kubernetes pods with key metrics: CPU usage, CPU request/limit utilization, memory working set, memory request/limit utilization, current pod phase (pending/running/succeeded/failed/unknown), and pod age (ms since start time). Each pod includes metadata attributes (namespace, node, workload owner such as deployment/statefulset/daemonset/job/cronjob, cluster). Supports filtering via a filter expression, custom groupBy to aggregate pods by any attribute, ordering by any of the six metrics (cpu, cpu_request, cpu_limit, memory, memory_request, memory_limit), and pagination via offset/limit. The response type is 'list' for the default k8s.pod.uid grouping (each row is one pod with its current phase) or 'grouped_list' for custom groupBy keys (each row aggregates pods in the group with per-phase counts: pendingPodCount, runningPodCount, succeededPodCount, failedPodCount, unknownPodCount derived from each pod's latest phase in the window). Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (podCPU, podCPURequest, podCPULimit, podMemory, podMemoryRequest, podMemoryLimit, podAge) return -1 as a sentinel when no data is available for that field.
|
||||
* @summary List Pods for Infra Monitoring
|
||||
*/
|
||||
export const listPods = (
|
||||
inframonitoringtypesPostablePodsDTO: BodyType<InframonitoringtypesPostablePodsDTO>,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<ListPods200>({
|
||||
url: `/api/v2/infra_monitoring/pods`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: inframonitoringtypesPostablePodsDTO,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getListPodsMutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listPods>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listPods>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['listPods'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof listPods>>,
|
||||
{ data: BodyType<InframonitoringtypesPostablePodsDTO> }
|
||||
> = (props) => {
|
||||
const { data } = props ?? {};
|
||||
|
||||
return listPods(data);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type ListPodsMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof listPods>>
|
||||
>;
|
||||
export type ListPodsMutationBody =
|
||||
BodyType<InframonitoringtypesPostablePodsDTO>;
|
||||
export type ListPodsMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary List Pods for Infra Monitoring
|
||||
*/
|
||||
export const useListPods = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listPods>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof listPods>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getListPodsMutationOptions(options);
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
|
||||
@@ -3243,6 +3243,108 @@ export interface InframonitoringtypesHostsDTO {
|
||||
warning?: Querybuildertypesv5QueryWarnDataDTO;
|
||||
}
|
||||
|
||||
export enum InframonitoringtypesPodPhaseDTO {
|
||||
pending = 'pending',
|
||||
running = 'running',
|
||||
succeeded = 'succeeded',
|
||||
failed = 'failed',
|
||||
unknown = 'unknown',
|
||||
'' = '',
|
||||
}
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
export type InframonitoringtypesPodRecordDTOMeta = {
|
||||
[key: string]: unknown;
|
||||
} | null;
|
||||
|
||||
export interface InframonitoringtypesPodRecordDTO {
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
failedPodCount: number;
|
||||
/**
|
||||
* @type object
|
||||
* @nullable true
|
||||
*/
|
||||
meta: InframonitoringtypesPodRecordDTOMeta;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
pendingPodCount: number;
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
podAge: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
podCPU: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
podCPULimit: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
podCPURequest: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
podMemory: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
podMemoryLimit: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
podMemoryRequest: number;
|
||||
podPhase: InframonitoringtypesPodPhaseDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
podUID: string;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
runningPodCount: number;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
succeededPodCount: number;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
unknownPodCount: number;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesPodsDTO {
|
||||
/**
|
||||
* @type boolean
|
||||
*/
|
||||
endTimeBeforeRetention: boolean;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
records: InframonitoringtypesPodRecordDTO[] | null;
|
||||
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
total: number;
|
||||
type: InframonitoringtypesResponseTypeDTO;
|
||||
warning?: Querybuildertypesv5QueryWarnDataDTO;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesPostableHostsDTO {
|
||||
/**
|
||||
* @type integer
|
||||
@@ -3271,6 +3373,34 @@ export interface InframonitoringtypesPostableHostsDTO {
|
||||
start: number;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesPostablePodsDTO {
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
end: number;
|
||||
filter?: Querybuildertypesv5FilterDTO;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
limit: number;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
offset?: number;
|
||||
orderBy?: Querybuildertypesv5OrderByDTO;
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
start: number;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesRequiredMetricsCheckDTO {
|
||||
/**
|
||||
* @type array
|
||||
@@ -7350,6 +7480,14 @@ export type ListHosts200 = {
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type ListPods200 = {
|
||||
data: InframonitoringtypesPodsDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type Livez200 = {
|
||||
data: FactoryResponseDTO;
|
||||
/**
|
||||
|
||||
@@ -139,8 +139,8 @@ function ChartPreview({
|
||||
if (startTime && endTime && startTime !== endTime) {
|
||||
dispatch(
|
||||
UpdateTimeInterval('custom', [
|
||||
parseInt(getTimeString(startTime), 10),
|
||||
parseInt(getTimeString(endTime), 10),
|
||||
Number.parseInt(getTimeString(startTime), 10),
|
||||
Number.parseInt(getTimeString(endTime), 10),
|
||||
]),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -370,7 +370,7 @@ function FormAlertRules({
|
||||
// onQueryCategoryChange handles changes to query category
|
||||
// in state as well as sets additional defaults
|
||||
const onQueryCategoryChange = (val: EQueryType): void => {
|
||||
const element = document.getElementById('top');
|
||||
const element = document.querySelector('#top');
|
||||
if (element) {
|
||||
element.scrollIntoView({ behavior: 'smooth' });
|
||||
}
|
||||
|
||||
@@ -279,7 +279,7 @@ function Explorer(): JSX.Element {
|
||||
[],
|
||||
);
|
||||
|
||||
const [warning, setWarning] = useState<Warning | undefined>(undefined);
|
||||
const [warning, setWarning] = useState<Warning | undefined>();
|
||||
|
||||
const oneChartPerQueryDisabledTooltip = useMemo(() => {
|
||||
if (splitedQueries.length <= 1) {
|
||||
@@ -291,7 +291,7 @@ function Explorer(): JSX.Element {
|
||||
if (disableOneChartPerQuery) {
|
||||
return 'One chart per query cannot be disabled for multiple queries with different units.';
|
||||
}
|
||||
return undefined;
|
||||
return;
|
||||
}, [disableOneChartPerQuery, splitedQueries.length, units.length]);
|
||||
|
||||
// Show the y axis unit selector if -
|
||||
|
||||
@@ -217,7 +217,7 @@ function Inspect({
|
||||
);
|
||||
}
|
||||
|
||||
if (!inspectMetricsTimeSeries.length) {
|
||||
if (inspectMetricsTimeSeries.length === 0) {
|
||||
return renderFallback(
|
||||
'inspect-metrics-empty',
|
||||
<Empty description="No time series found for this metric to inspect." />,
|
||||
|
||||
@@ -254,10 +254,10 @@ export function useInspectMetrics(
|
||||
const valuesMap = new Map<number, number>();
|
||||
|
||||
series.values.forEach(({ timestamp, value }) => {
|
||||
valuesMap.set(timestamp, parseFloat(value));
|
||||
valuesMap.set(timestamp, Number.parseFloat(value));
|
||||
});
|
||||
|
||||
return timestamps.map((timestamp) => valuesMap.get(timestamp) ?? NaN);
|
||||
return timestamps.map((timestamp) => valuesMap.get(timestamp) ?? Number.NaN);
|
||||
});
|
||||
|
||||
const rawData = [timestamps, ...timeseriesArray];
|
||||
@@ -271,7 +271,7 @@ export function useInspectMetrics(
|
||||
labels.add(label);
|
||||
});
|
||||
});
|
||||
return Array.from(labels);
|
||||
return [...labels];
|
||||
}, [inspectMetricsData]);
|
||||
|
||||
const reset = useCallback(() => {
|
||||
|
||||
@@ -115,8 +115,10 @@ export const useGetQueryRange: UseGetQueryRange = (
|
||||
|
||||
const updatedQuery = updateBarStepInterval(
|
||||
requestData.query,
|
||||
requestData.start ? requestData.start * 1e3 : parseInt(start, 10) * 1e3,
|
||||
requestData.end ? requestData.end * 1e3 : parseInt(end, 10) * 1e3,
|
||||
requestData.start
|
||||
? requestData.start * 1e3
|
||||
: Number.parseInt(start, 10) * 1e3,
|
||||
requestData.end ? requestData.end * 1e3 : Number.parseInt(end, 10) * 1e3,
|
||||
);
|
||||
|
||||
return {
|
||||
|
||||
@@ -98,7 +98,7 @@ function LogsExplorer(): JSX.Element {
|
||||
setIsLoadingQueries(false);
|
||||
}, [queryClient]);
|
||||
|
||||
const [warning, setWarning] = useState<Warning | undefined>(undefined);
|
||||
const [warning, setWarning] = useState<Warning | undefined>();
|
||||
|
||||
const handleChangeSelectedView = useCallback(
|
||||
(view: ExplorerViews, querySearchParameters?: ICurrentQueryData): void => {
|
||||
|
||||
@@ -101,7 +101,7 @@ function TracesExplorer(): JSX.Element {
|
||||
getExplorerViewFromUrl(searchParams, panelTypesFromUrl),
|
||||
);
|
||||
|
||||
const [warning, setWarning] = useState<Warning | undefined>(undefined);
|
||||
const [warning, setWarning] = useState<Warning | undefined>();
|
||||
const [isOpen, setOpen] = useState<boolean>(true);
|
||||
|
||||
const defaultQuery = useMemo(
|
||||
|
||||
@@ -16,7 +16,7 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
|
||||
ID: "ListHosts",
|
||||
Tags: []string{"inframonitoring"},
|
||||
Summary: "List Hosts for Infra Monitoring",
|
||||
Description: "Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary.",
|
||||
Description: "Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (cpu, memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available for that field.",
|
||||
Request: new(inframonitoringtypes.PostableHosts),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(inframonitoringtypes.Hosts),
|
||||
@@ -29,5 +29,24 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v2/infra_monitoring/pods", handler.New(
|
||||
provider.authZ.ViewAccess(provider.infraMonitoringHandler.ListPods),
|
||||
handler.OpenAPIDef{
|
||||
ID: "ListPods",
|
||||
Tags: []string{"inframonitoring"},
|
||||
Summary: "List Pods for Infra Monitoring",
|
||||
Description: "Returns a paginated list of Kubernetes pods with key metrics: CPU usage, CPU request/limit utilization, memory working set, memory request/limit utilization, current pod phase (pending/running/succeeded/failed/unknown), and pod age (ms since start time). Each pod includes metadata attributes (namespace, node, workload owner such as deployment/statefulset/daemonset/job/cronjob, cluster). Supports filtering via a filter expression, custom groupBy to aggregate pods by any attribute, ordering by any of the six metrics (cpu, cpu_request, cpu_limit, memory, memory_request, memory_limit), and pagination via offset/limit. The response type is 'list' for the default k8s.pod.uid grouping (each row is one pod with its current phase) or 'grouped_list' for custom groupBy keys (each row aggregates pods in the group with per-phase counts: pendingPodCount, runningPodCount, succeededPodCount, failedPodCount, unknownPodCount derived from each pod's latest phase in the window). Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (podCPU, podCPURequest, podCPULimit, podMemory, podMemoryRequest, podMemoryLimit, podAge) return -1 as a sentinel when no data is available for that field.",
|
||||
Request: new(inframonitoringtypes.PostablePods),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(inframonitoringtypes.Pods),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -45,3 +45,27 @@ func (h *handler) ListHosts(rw http.ResponseWriter, req *http.Request) {
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *handler) ListPods(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
var parsedReq inframonitoringtypes.PostablePods
|
||||
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.module.ListPods(req.Context(), orgID, &parsedReq)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
@@ -14,3 +14,12 @@ type groupHostStatusCounts struct {
|
||||
Active int
|
||||
Inactive int
|
||||
}
|
||||
|
||||
// podPhaseCounts holds per-group pod counts bucketed by latest phase in window.
|
||||
type podPhaseCounts struct {
|
||||
Pending int
|
||||
Running int
|
||||
Succeeded int
|
||||
Failed int
|
||||
Unknown int
|
||||
}
|
||||
|
||||
@@ -159,3 +159,86 @@ func (m *module) ListHosts(ctx context.Context, orgID valuer.UUID, req *inframon
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *module) ListPods(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostablePods) (*inframonitoringtypes.Pods, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &inframonitoringtypes.Pods{}
|
||||
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: inframonitoringtypes.PodsOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
}
|
||||
}
|
||||
|
||||
if len(req.GroupBy) == 0 {
|
||||
req.GroupBy = []qbtypes.GroupByKey{podUIDGroupByKey}
|
||||
resp.Type = inframonitoringtypes.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
|
||||
}
|
||||
|
||||
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, podsTableMetricNamesList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(missingMetrics) > 0 {
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
|
||||
resp.Records = []inframonitoringtypes.PodRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
if req.End < int64(minFirstReportedUnixMilli) {
|
||||
resp.EndTimeBeforeRetention = true
|
||||
resp.Records = []inframonitoringtypes.PodRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: []string{}}
|
||||
|
||||
metadataMap, err := m.getPodsTableMetadata(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Total = len(metadataMap)
|
||||
|
||||
pageGroups, err := m.getTopPodGroups(ctx, orgID, req, metadataMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pageGroups) == 0 {
|
||||
resp.Records = []inframonitoringtypes.PodRecord{}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
filterExpr := ""
|
||||
if req.Filter != nil {
|
||||
filterExpr = req.Filter.Expression
|
||||
}
|
||||
|
||||
fullQueryReq := buildFullQueryRequest(req.Start, req.End, filterExpr, req.GroupBy, pageGroups, m.newPodsTableListQuery())
|
||||
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
phaseCounts, err := m.getPerGroupPodPhaseCounts(ctx, req, pageGroups)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isPodUIDInGroupBy := isKeyInGroupByAttrs(req.GroupBy, podUIDAttrKey)
|
||||
resp.Records = buildPodRecords(isPodUIDInGroupBy, queryResp, pageGroups, req.GroupBy, metadataMap, phaseCounts, req.End)
|
||||
resp.Warning = queryResp.Warning
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
334
pkg/modules/inframonitoring/implinframonitoring/pods.go
Normal file
334
pkg/modules/inframonitoring/implinframonitoring/pods.go
Normal file
@@ -0,0 +1,334 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
// buildPodRecords assembles the page records. Phase counts come from
|
||||
// phaseCounts in both modes. In list mode (isPodUIDInGroupBy=true) each
|
||||
// group is one pod, so exactly one count is 1; PodPhase is derived from
|
||||
// which one. In grouped_list mode PodPhase stays PodPhaseNone.
|
||||
func buildPodRecords(
|
||||
isPodUIDInGroupBy bool,
|
||||
resp *qbtypes.QueryRangeResponse,
|
||||
pageGroups []map[string]string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
metadataMap map[string]map[string]string,
|
||||
phaseCounts map[string]podPhaseCounts,
|
||||
reqEnd int64,
|
||||
) []inframonitoringtypes.PodRecord {
|
||||
metricsMap := parseFullQueryResponse(resp, groupBy)
|
||||
|
||||
records := make([]inframonitoringtypes.PodRecord, 0, len(pageGroups))
|
||||
for _, labels := range pageGroups {
|
||||
compositeKey := compositeKeyFromLabels(labels, groupBy)
|
||||
podUID := labels[podUIDAttrKey]
|
||||
|
||||
record := inframonitoringtypes.PodRecord{ // initialize with default values
|
||||
PodUID: podUID,
|
||||
PodPhase: inframonitoringtypes.PodPhaseNone,
|
||||
PodCPU: -1,
|
||||
PodCPURequest: -1,
|
||||
PodCPULimit: -1,
|
||||
PodMemory: -1,
|
||||
PodMemoryRequest: -1,
|
||||
PodMemoryLimit: -1,
|
||||
PodAge: -1,
|
||||
Meta: map[string]any{},
|
||||
}
|
||||
|
||||
if metrics, ok := metricsMap[compositeKey]; ok {
|
||||
if v, exists := metrics["A"]; exists {
|
||||
record.PodCPU = v
|
||||
}
|
||||
if v, exists := metrics["B"]; exists {
|
||||
record.PodCPURequest = v
|
||||
}
|
||||
if v, exists := metrics["C"]; exists {
|
||||
record.PodCPULimit = v
|
||||
}
|
||||
if v, exists := metrics["D"]; exists {
|
||||
record.PodMemory = v
|
||||
}
|
||||
if v, exists := metrics["E"]; exists {
|
||||
record.PodMemoryRequest = v
|
||||
}
|
||||
if v, exists := metrics["F"]; exists {
|
||||
record.PodMemoryLimit = v
|
||||
}
|
||||
}
|
||||
|
||||
if phaseCountsForGroup, ok := phaseCounts[compositeKey]; ok {
|
||||
record.PendingPodCount = phaseCountsForGroup.Pending
|
||||
record.RunningPodCount = phaseCountsForGroup.Running
|
||||
record.SucceededPodCount = phaseCountsForGroup.Succeeded
|
||||
record.FailedPodCount = phaseCountsForGroup.Failed
|
||||
record.UnknownPodCount = phaseCountsForGroup.Unknown
|
||||
|
||||
// In list mode each group is one pod; the count==1 bucket identifies the phase.
|
||||
if isPodUIDInGroupBy {
|
||||
switch {
|
||||
case phaseCountsForGroup.Pending == 1:
|
||||
record.PodPhase = inframonitoringtypes.PodPhasePending
|
||||
case phaseCountsForGroup.Running == 1:
|
||||
record.PodPhase = inframonitoringtypes.PodPhaseRunning
|
||||
case phaseCountsForGroup.Succeeded == 1:
|
||||
record.PodPhase = inframonitoringtypes.PodPhaseSucceeded
|
||||
case phaseCountsForGroup.Failed == 1:
|
||||
record.PodPhase = inframonitoringtypes.PodPhaseFailed
|
||||
case phaseCountsForGroup.Unknown == 1:
|
||||
record.PodPhase = inframonitoringtypes.PodPhaseUnknown
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if attrs, ok := metadataMap[compositeKey]; ok && isPodUIDInGroupBy {
|
||||
// the condition above ensures we deduce age only if pod uid is in group by because if
|
||||
// it's not in group by then we might have multiple pod uids in the same group and hence then podAge wont make sense
|
||||
if startTimeStr, exists := attrs[podStartTimeAttrKey]; exists && startTimeStr != "" {
|
||||
if t, err := time.Parse(time.RFC3339, startTimeStr); err == nil {
|
||||
startTimeMs := t.UnixMilli()
|
||||
if startTimeMs > 0 {
|
||||
record.PodAge = reqEnd - startTimeMs
|
||||
}
|
||||
}
|
||||
}
|
||||
for k, v := range attrs {
|
||||
record.Meta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
records = append(records, record)
|
||||
}
|
||||
return records
|
||||
}
|
||||
|
||||
func (m *module) getTopPodGroups(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
req *inframonitoringtypes.PostablePods,
|
||||
metadataMap map[string]map[string]string,
|
||||
) ([]map[string]string, error) {
|
||||
orderByKey := req.OrderBy.Key.Name
|
||||
queryNamesForOrderBy := orderByToPodsQueryNames[orderByKey]
|
||||
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
|
||||
|
||||
topReq := &qbtypes.QueryRangeRequest{
|
||||
Start: uint64(req.Start),
|
||||
End: uint64(req.End),
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, envelope := range m.newPodsTableListQuery().CompositeQuery.Queries {
|
||||
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
|
||||
continue
|
||||
}
|
||||
copied := envelope
|
||||
if copied.Type == qbtypes.QueryTypeBuilder {
|
||||
existingExpr := ""
|
||||
if f := copied.GetFilter(); f != nil {
|
||||
existingExpr = f.Expression
|
||||
}
|
||||
reqFilterExpr := ""
|
||||
if req.Filter != nil {
|
||||
reqFilterExpr = req.Filter.Expression
|
||||
}
|
||||
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
|
||||
copied.SetFilter(&qbtypes.Filter{Expression: merged})
|
||||
copied.SetGroupBy(req.GroupBy)
|
||||
}
|
||||
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
|
||||
}
|
||||
|
||||
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
|
||||
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
|
||||
}
|
||||
|
||||
func (m *module) getPodsTableMetadata(ctx context.Context, req *inframonitoringtypes.PostablePods) (map[string]map[string]string, error) {
|
||||
var nonGroupByAttrs []string
|
||||
for _, key := range podAttrKeysForMetadata {
|
||||
if !isKeyInGroupByAttrs(req.GroupBy, key) {
|
||||
nonGroupByAttrs = append(nonGroupByAttrs, key)
|
||||
}
|
||||
}
|
||||
return m.getMetadata(ctx, podsTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
|
||||
}
|
||||
|
||||
// getPerGroupPodPhaseCounts computes per-group pod counts bucketed by each
|
||||
// pod's latest phase in the requested window.
|
||||
// Pipeline:
|
||||
//
|
||||
// timeSeriesFPs: fp ↔ (pod_uid, groupBy cols) from the time_series table.
|
||||
// User filter + page-groups filter applied here.
|
||||
// latestPhasePerPod: INNER JOIN samples × timeSeriesFPs, collapsed to
|
||||
// the latest phase per pod via argMax(value, unix_milli).
|
||||
// countPodsPerPhase: per-group uniqExactIf into 5 phase buckets.
|
||||
//
|
||||
// Groups absent from the result map have implicit zero counts (caller default).
|
||||
func (m *module) getPerGroupPodPhaseCounts(
|
||||
ctx context.Context,
|
||||
req *inframonitoringtypes.PostablePods,
|
||||
pageGroups []map[string]string,
|
||||
) (map[string]podPhaseCounts, error) {
|
||||
if len(pageGroups) == 0 || len(req.GroupBy) == 0 {
|
||||
return map[string]podPhaseCounts{}, nil
|
||||
}
|
||||
|
||||
// Merged filter expression (user filter + page-groups IN clauses).
|
||||
reqFilterExpr := ""
|
||||
if req.Filter != nil {
|
||||
reqFilterExpr = req.Filter.Expression
|
||||
}
|
||||
pageGroupsFilterExpr := buildPageGroupsFilterExpr(pageGroups)
|
||||
filterExpr := mergeFilterExpressions(reqFilterExpr, pageGroupsFilterExpr)
|
||||
|
||||
// Resolve tables. Same convention as hosts (distributed names from helpers).
|
||||
adjustedStart, adjustedEnd, _, localTimeSeriesTable := telemetrymetrics.WhichTSTableToUse(
|
||||
uint64(req.Start), uint64(req.End), nil,
|
||||
)
|
||||
samplesTable := telemetrymetrics.WhichSamplesTableToUse(
|
||||
uint64(req.Start), uint64(req.End),
|
||||
metrictypes.UnspecifiedType, metrictypes.TimeAggregationUnspecified, nil,
|
||||
)
|
||||
valueCol := telemetrymetrics.ValueColumnForSamplesTable(samplesTable)
|
||||
|
||||
// ----- timeSeriesFPs -----
|
||||
timeSeriesFPs := sqlbuilder.NewSelectBuilder()
|
||||
timeSeriesFPsSelectCols := []string{
|
||||
"fingerprint",
|
||||
fmt.Sprintf("JSONExtractString(labels, %s) AS pod_uid", timeSeriesFPs.Var(podUIDAttrKey)),
|
||||
}
|
||||
for _, key := range req.GroupBy {
|
||||
timeSeriesFPsSelectCols = append(timeSeriesFPsSelectCols,
|
||||
fmt.Sprintf("JSONExtractString(labels, %s) AS %s", timeSeriesFPs.Var(key.Name), quoteIdentifier(key.Name)),
|
||||
)
|
||||
}
|
||||
timeSeriesFPs.Select(timeSeriesFPsSelectCols...)
|
||||
timeSeriesFPs.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, localTimeSeriesTable))
|
||||
timeSeriesFPs.Where(
|
||||
timeSeriesFPs.E("metric_name", podPhaseMetricName),
|
||||
timeSeriesFPs.GE("unix_milli", adjustedStart),
|
||||
timeSeriesFPs.L("unix_milli", adjustedEnd),
|
||||
)
|
||||
if filterExpr != "" {
|
||||
filterClause, err := m.buildFilterClause(ctx, &qbtypes.Filter{Expression: filterExpr}, req.Start, req.End)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if filterClause != nil {
|
||||
timeSeriesFPs.AddWhereClause(filterClause)
|
||||
}
|
||||
}
|
||||
timeSeriesFPsGroupBy := []string{"fingerprint", "pod_uid"}
|
||||
for _, key := range req.GroupBy {
|
||||
timeSeriesFPsGroupBy = append(timeSeriesFPsGroupBy, quoteIdentifier(key.Name))
|
||||
}
|
||||
timeSeriesFPs.GroupBy(timeSeriesFPsGroupBy...)
|
||||
timeSeriesFPsSQL, timeSeriesFPsArgs := timeSeriesFPs.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
latestPhasePerPod := sqlbuilder.NewSelectBuilder()
|
||||
latestPhasePerPodSelectCols := []string{"tsfp.pod_uid AS pod_uid"}
|
||||
latestPhasePerPodGroupBy := []string{"pod_uid"}
|
||||
for _, key := range req.GroupBy {
|
||||
col := quoteIdentifier(key.Name)
|
||||
latestPhasePerPodSelectCols = append(latestPhasePerPodSelectCols, fmt.Sprintf("tsfp.%s AS %s", col, col))
|
||||
latestPhasePerPodGroupBy = append(latestPhasePerPodGroupBy, col)
|
||||
}
|
||||
latestPhasePerPodSelectCols = append(latestPhasePerPodSelectCols,
|
||||
fmt.Sprintf("argMax(samples.%s, samples.unix_milli) AS phase_value", valueCol),
|
||||
)
|
||||
latestPhasePerPod.Select(latestPhasePerPodSelectCols...)
|
||||
latestPhasePerPod.From(fmt.Sprintf(
|
||||
"%s.%s AS samples INNER JOIN time_series_fps AS tsfp ON samples.fingerprint = tsfp.fingerprint",
|
||||
telemetrymetrics.DBName, samplesTable,
|
||||
))
|
||||
latestPhasePerPod.Where(
|
||||
latestPhasePerPod.E("samples.metric_name", podPhaseMetricName),
|
||||
latestPhasePerPod.GE("samples.unix_milli", req.Start),
|
||||
latestPhasePerPod.L("samples.unix_milli", req.End),
|
||||
"tsfp.pod_uid != ''",
|
||||
)
|
||||
latestPhasePerPod.GroupBy(latestPhasePerPodGroupBy...)
|
||||
latestPhasePerPodSQL, latestPhasePerPodArgs := latestPhasePerPod.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
// ----- countPodsPerPhase (outer SELECT) -----
|
||||
countPodsPerPhaseSelectCols := make([]string, 0, len(req.GroupBy)+5)
|
||||
countPodsPerPhaseGroupBy := make([]string, 0, len(req.GroupBy))
|
||||
for _, key := range req.GroupBy {
|
||||
col := quoteIdentifier(key.Name)
|
||||
countPodsPerPhaseSelectCols = append(countPodsPerPhaseSelectCols, col)
|
||||
countPodsPerPhaseGroupBy = append(countPodsPerPhaseGroupBy, col)
|
||||
}
|
||||
countPodsPerPhaseSelectCols = append(countPodsPerPhaseSelectCols,
|
||||
fmt.Sprintf("uniqExactIf(pod_uid, phase_value = %d) AS pending_count", inframonitoringtypes.PodPhaseNumPending),
|
||||
fmt.Sprintf("uniqExactIf(pod_uid, phase_value = %d) AS running_count", inframonitoringtypes.PodPhaseNumRunning),
|
||||
fmt.Sprintf("uniqExactIf(pod_uid, phase_value = %d) AS succeeded_count", inframonitoringtypes.PodPhaseNumSucceeded),
|
||||
fmt.Sprintf("uniqExactIf(pod_uid, phase_value = %d) AS failed_count", inframonitoringtypes.PodPhaseNumFailed),
|
||||
fmt.Sprintf("uniqExactIf(pod_uid, phase_value = %d) AS unknown_count", inframonitoringtypes.PodPhaseNumUnknown),
|
||||
)
|
||||
countPodsPerPhaseSQL := fmt.Sprintf(
|
||||
"SELECT %s FROM latest_phase_per_pod GROUP BY %s",
|
||||
strings.Join(countPodsPerPhaseSelectCols, ", "),
|
||||
strings.Join(countPodsPerPhaseGroupBy, ", "),
|
||||
)
|
||||
|
||||
// Combine CTEs + outer.
|
||||
cteFragments := []string{
|
||||
fmt.Sprintf("time_series_fps AS (%s)", timeSeriesFPsSQL),
|
||||
fmt.Sprintf("latest_phase_per_pod AS (%s)", latestPhasePerPodSQL),
|
||||
}
|
||||
finalSQL := querybuilder.CombineCTEs(cteFragments) + countPodsPerPhaseSQL
|
||||
finalArgs := querybuilder.PrependArgs([][]any{timeSeriesFPsArgs, latestPhasePerPodArgs}, nil)
|
||||
|
||||
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, finalSQL, finalArgs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
result := make(map[string]podPhaseCounts)
|
||||
for rows.Next() {
|
||||
groupVals := make([]string, len(req.GroupBy))
|
||||
scanPtrs := make([]any, 0, len(req.GroupBy)+5)
|
||||
for i := range groupVals {
|
||||
scanPtrs = append(scanPtrs, &groupVals[i])
|
||||
}
|
||||
var pending, running, succeeded, failed, unknown uint64
|
||||
scanPtrs = append(scanPtrs, &pending, &running, &succeeded, &failed, &unknown)
|
||||
|
||||
if err := rows.Scan(scanPtrs...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result[compositeKeyFromList(groupVals)] = podPhaseCounts{
|
||||
Pending: int(pending),
|
||||
Running: int(running),
|
||||
Succeeded: int(succeeded),
|
||||
Failed: int(failed),
|
||||
Unknown: int(unknown),
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -0,0 +1,178 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
const (
|
||||
podUIDAttrKey = "k8s.pod.uid"
|
||||
podStartTimeAttrKey = "k8s.pod.start_time"
|
||||
podPhaseMetricName = "k8s.pod.phase"
|
||||
)
|
||||
|
||||
var podUIDGroupByKey = qbtypes.GroupByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: podUIDAttrKey,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
}
|
||||
|
||||
var podsTableMetricNamesList = []string{
|
||||
"k8s.pod.cpu.usage",
|
||||
"k8s.pod.cpu_request_utilization",
|
||||
"k8s.pod.cpu_limit_utilization",
|
||||
"k8s.pod.memory.working_set",
|
||||
"k8s.pod.memory_request_utilization",
|
||||
"k8s.pod.memory_limit_utilization",
|
||||
"k8s.pod.phase",
|
||||
}
|
||||
|
||||
var podAttrKeysForMetadata = []string{
|
||||
"k8s.pod.uid",
|
||||
"k8s.pod.name",
|
||||
"k8s.namespace.name",
|
||||
"k8s.node.name",
|
||||
"k8s.deployment.name",
|
||||
"k8s.statefulset.name",
|
||||
"k8s.daemonset.name",
|
||||
"k8s.job.name",
|
||||
"k8s.cronjob.name",
|
||||
"k8s.cluster.name",
|
||||
"k8s.pod.start_time",
|
||||
}
|
||||
|
||||
var orderByToPodsQueryNames = map[string][]string{
|
||||
inframonitoringtypes.PodsOrderByCPU: {"A"},
|
||||
inframonitoringtypes.PodsOrderByCPURequest: {"B"},
|
||||
inframonitoringtypes.PodsOrderByCPULimit: {"C"},
|
||||
inframonitoringtypes.PodsOrderByMemory: {"D"},
|
||||
inframonitoringtypes.PodsOrderByMemoryRequest: {"E"},
|
||||
inframonitoringtypes.PodsOrderByMemoryLimit: {"F"},
|
||||
}
|
||||
|
||||
// newPodsTableListQuery builds the composite QB v5 request for the pods list.
|
||||
// Pod phase is derived separately via getPerGroupPodPhaseCounts (works for both
|
||||
// list and grouped_list modes), so no phase query is included here.
|
||||
func (m *module) newPodsTableListQuery() *qbtypes.QueryRangeRequest {
|
||||
queries := []qbtypes.QueryEnvelope{
|
||||
// Query A: CPU usage
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.cpu.usage",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query B: CPU request utilization
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.cpu_request_utilization",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query C: CPU limit utilization
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "C",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.cpu_limit_utilization",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query D: Memory working set
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "D",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.memory.working_set",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query E: Memory request utilization
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "E",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.memory_request_utilization",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query F: Memory limit utilization
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "F",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.memory_limit_utilization",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: queries,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -10,8 +10,10 @@ import (
|
||||
|
||||
type Handler interface {
|
||||
ListHosts(http.ResponseWriter, *http.Request)
|
||||
ListPods(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
type Module interface {
|
||||
ListHosts(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableHosts) (*inframonitoringtypes.Hosts, error)
|
||||
ListPods(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostablePods) (*inframonitoringtypes.Pods, error)
|
||||
}
|
||||
|
||||
@@ -124,6 +124,17 @@ func CountExpressionForSamplesTable(tableName string) string {
|
||||
return "sum(count)"
|
||||
}
|
||||
|
||||
// ValueColumnForSamplesTable returns the column name holding the sample value:
|
||||
// "last" for the 5m/30m aggregated tables, "value" otherwise.
|
||||
// note all the other columns in the aggregated samples tables are nothing but aggregations.
|
||||
// and so "last" is the value column for these tables.
|
||||
func ValueColumnForSamplesTable(tableName string) string {
|
||||
if tableName == SamplesV4Agg5mTableName || tableName == SamplesV4Agg30mTableName {
|
||||
return "last"
|
||||
}
|
||||
return "value"
|
||||
}
|
||||
|
||||
// start and end are in milliseconds
|
||||
// we have three tables for samples
|
||||
// 1. distributed_samples_v4
|
||||
|
||||
@@ -49,7 +49,7 @@ type HostFilter struct {
|
||||
FilterByStatus HostStatus `json:"filterByStatus"`
|
||||
}
|
||||
|
||||
// Validate ensures HostsListRequest contains acceptable values.
|
||||
// Validate ensures PostableHosts contains acceptable values.
|
||||
func (req *PostableHosts) Validate() error {
|
||||
if req == nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
|
||||
|
||||
109
pkg/types/inframonitoringtypes/pods.go
Normal file
109
pkg/types/inframonitoringtypes/pods.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
type Pods struct {
|
||||
Type ResponseType `json:"type" required:"true"`
|
||||
Records []PodRecord `json:"records" required:"true"`
|
||||
Total int `json:"total" required:"true"`
|
||||
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck" required:"true"`
|
||||
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention" required:"true"`
|
||||
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
|
||||
}
|
||||
|
||||
type PodRecord struct {
|
||||
PodUID string `json:"podUID" required:"true"`
|
||||
PodCPU float64 `json:"podCPU" required:"true"`
|
||||
PodCPURequest float64 `json:"podCPURequest" required:"true"`
|
||||
PodCPULimit float64 `json:"podCPULimit" required:"true"`
|
||||
PodMemory float64 `json:"podMemory" required:"true"`
|
||||
PodMemoryRequest float64 `json:"podMemoryRequest" required:"true"`
|
||||
PodMemoryLimit float64 `json:"podMemoryLimit" required:"true"`
|
||||
PodPhase PodPhase `json:"podPhase" required:"true"`
|
||||
PendingPodCount int `json:"pendingPodCount" required:"true"`
|
||||
RunningPodCount int `json:"runningPodCount" required:"true"`
|
||||
SucceededPodCount int `json:"succeededPodCount" required:"true"`
|
||||
FailedPodCount int `json:"failedPodCount" required:"true"`
|
||||
UnknownPodCount int `json:"unknownPodCount" required:"true"`
|
||||
PodAge int64 `json:"podAge" required:"true"`
|
||||
Meta map[string]interface{} `json:"meta" required:"true"`
|
||||
}
|
||||
|
||||
// PostablePods is the request body for the v2 pods list API.
|
||||
type PostablePods struct {
|
||||
Start int64 `json:"start" required:"true"`
|
||||
End int64 `json:"end" required:"true"`
|
||||
Filter *qbtypes.Filter `json:"filter"`
|
||||
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
|
||||
OrderBy *qbtypes.OrderBy `json:"orderBy"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit" required:"true"`
|
||||
}
|
||||
|
||||
// Validate ensures PostablePods contains acceptable values.
|
||||
func (req *PostablePods) Validate() error {
|
||||
if req == nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
|
||||
}
|
||||
|
||||
if req.Start <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid start time %d: start must be greater than 0",
|
||||
req.Start,
|
||||
)
|
||||
}
|
||||
|
||||
if req.End <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid end time %d: end must be greater than 0",
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Start >= req.End {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time range: start (%d) must be less than end (%d)",
|
||||
req.Start,
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Limit < 1 || req.Limit > 5000 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
|
||||
}
|
||||
|
||||
if req.Offset < 0 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
|
||||
}
|
||||
|
||||
if req.OrderBy != nil {
|
||||
if !slices.Contains(PodsValidOrderByKeys, req.OrderBy.Key.Name) {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
|
||||
}
|
||||
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON validates input immediately after decoding.
|
||||
func (req *PostablePods) UnmarshalJSON(data []byte) error {
|
||||
type raw PostablePods
|
||||
var decoded raw
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
*req = PostablePods(decoded)
|
||||
return req.Validate()
|
||||
}
|
||||
55
pkg/types/inframonitoringtypes/pods_constants.go
Normal file
55
pkg/types/inframonitoringtypes/pods_constants.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
type PodPhase struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
PodPhasePending = PodPhase{valuer.NewString("pending")}
|
||||
PodPhaseRunning = PodPhase{valuer.NewString("running")}
|
||||
PodPhaseSucceeded = PodPhase{valuer.NewString("succeeded")}
|
||||
PodPhaseFailed = PodPhase{valuer.NewString("failed")}
|
||||
PodPhaseUnknown = PodPhase{valuer.NewString("unknown")}
|
||||
PodPhaseNone = PodPhase{valuer.NewString("")}
|
||||
)
|
||||
|
||||
func (PodPhase) Enum() []any {
|
||||
return []any{
|
||||
PodPhasePending,
|
||||
PodPhaseRunning,
|
||||
PodPhaseSucceeded,
|
||||
PodPhaseFailed,
|
||||
PodPhaseUnknown,
|
||||
PodPhaseNone,
|
||||
}
|
||||
}
|
||||
|
||||
// Numeric pod phase values emitted by the k8s.pod.phase metric
|
||||
// (source: OTel kubeletstats receiver).
|
||||
const (
|
||||
PodPhaseNumPending = 1
|
||||
PodPhaseNumRunning = 2
|
||||
PodPhaseNumSucceeded = 3
|
||||
PodPhaseNumFailed = 4
|
||||
PodPhaseNumUnknown = 5
|
||||
)
|
||||
|
||||
const (
|
||||
PodsOrderByCPU = "cpu"
|
||||
PodsOrderByCPURequest = "cpu_request"
|
||||
PodsOrderByCPULimit = "cpu_limit"
|
||||
PodsOrderByMemory = "memory"
|
||||
PodsOrderByMemoryRequest = "memory_request"
|
||||
PodsOrderByMemoryLimit = "memory_limit"
|
||||
)
|
||||
|
||||
var PodsValidOrderByKeys = []string{
|
||||
PodsOrderByCPU,
|
||||
PodsOrderByCPURequest,
|
||||
PodsOrderByCPULimit,
|
||||
PodsOrderByMemory,
|
||||
PodsOrderByMemoryRequest,
|
||||
PodsOrderByMemoryLimit,
|
||||
}
|
||||
219
pkg/types/inframonitoringtypes/pods_test.go
Normal file
219
pkg/types/inframonitoringtypes/pods_test.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPostablePods_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req *PostablePods
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil request",
|
||||
req: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time zero",
|
||||
req: &PostablePods{
|
||||
Start: 0,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time negative",
|
||||
req: &PostablePods{
|
||||
Start: -1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "end time zero",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 0,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time greater than end time",
|
||||
req: &PostablePods{
|
||||
Start: 2000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time equal to end time",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit zero",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 0,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit negative",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: -10,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit exceeds max",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 5001,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "offset negative",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: -5,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy nil is valid",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key cpu and direction asc",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: PodsOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with phase key is rejected",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "phase",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with invalid key",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "unknown",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key but invalid direction",
|
||||
req: &PostablePods{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: PodsOrderByMemory,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.req.Validate()
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -11,9 +11,7 @@ import (
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrCodeInvalidPlannedMaintenancePayload = errors.MustNewCode("invalid_planned_maintenance_payload")
|
||||
)
|
||||
var ErrCodeInvalidPlannedMaintenancePayload = errors.MustNewCode("invalid_planned_maintenance_payload")
|
||||
|
||||
type MaintenanceStatus struct {
|
||||
valuer.String
|
||||
@@ -63,15 +61,15 @@ type StorablePlannedMaintenance struct {
|
||||
}
|
||||
|
||||
type PlannedMaintenance struct {
|
||||
ID valuer.UUID `json:"id" required:"true"`
|
||||
Name string `json:"name" required:"true"`
|
||||
Description string `json:"description"`
|
||||
Schedule *Schedule `json:"schedule" required:"true"`
|
||||
RuleIDs []string `json:"alertIds"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
CreatedBy string `json:"createdBy"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
ID valuer.UUID `json:"id" required:"true"`
|
||||
Name string `json:"name" required:"true"`
|
||||
Description string `json:"description"`
|
||||
Schedule *Schedule `json:"schedule" required:"true"`
|
||||
RuleIDs []string `json:"alertIds"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
CreatedBy string `json:"createdBy"`
|
||||
UpdatedAt time.Time `json:"updatedAt"`
|
||||
UpdatedBy string `json:"updatedBy"`
|
||||
Status MaintenanceStatus `json:"status" required:"true"`
|
||||
Kind MaintenanceKind `json:"kind" required:"true"`
|
||||
}
|
||||
@@ -161,8 +159,11 @@ func (m *PlannedMaintenance) ShouldSkip(ruleID string, now time.Time) bool {
|
||||
|
||||
currentTime := now.In(loc)
|
||||
|
||||
// fixed schedule
|
||||
if !m.Schedule.StartTime.IsZero() && !m.Schedule.EndTime.IsZero() {
|
||||
// fixed schedule — only when no recurrence is configured.
|
||||
// When recurrence is set, the recurring check below handles everything;
|
||||
// falling through here would cause the window to match the absolute
|
||||
// StartTime–EndTime range instead of the daily/weekly/monthly pattern.
|
||||
if m.Schedule.Recurrence == nil && !m.Schedule.StartTime.IsZero() && !m.Schedule.EndTime.IsZero() {
|
||||
startTime := m.Schedule.StartTime.In(loc)
|
||||
endTime := m.Schedule.EndTime.In(loc)
|
||||
if currentTime.Equal(startTime) || currentTime.Equal(endTime) ||
|
||||
|
||||
@@ -13,7 +13,6 @@ func timePtr(t time.Time) *time.Time {
|
||||
}
|
||||
|
||||
func TestShouldSkipMaintenance(t *testing.T) {
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
maintenance *PlannedMaintenance
|
||||
@@ -499,7 +498,7 @@ func TestShouldSkipMaintenance(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ts: time.Date(2024, 04, 1, 12, 10, 0, 0, time.UTC),
|
||||
ts: time.Date(2024, 4, 1, 12, 10, 0, 0, time.UTC),
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
@@ -508,14 +507,14 @@ func TestShouldSkipMaintenance(t *testing.T) {
|
||||
Schedule: &Schedule{
|
||||
Timezone: "UTC",
|
||||
Recurrence: &Recurrence{
|
||||
StartTime: time.Date(2024, 04, 01, 12, 0, 0, 0, time.UTC),
|
||||
StartTime: time.Date(2024, 4, 1, 12, 0, 0, 0, time.UTC),
|
||||
Duration: valuer.MustParseTextDuration("2h"),
|
||||
RepeatType: RepeatTypeWeekly,
|
||||
RepeatOn: []RepeatOn{RepeatOnMonday},
|
||||
},
|
||||
},
|
||||
},
|
||||
ts: time.Date(2024, 04, 15, 12, 10, 0, 0, time.UTC),
|
||||
ts: time.Date(2024, 4, 15, 12, 10, 0, 0, time.UTC),
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
@@ -524,14 +523,14 @@ func TestShouldSkipMaintenance(t *testing.T) {
|
||||
Schedule: &Schedule{
|
||||
Timezone: "UTC",
|
||||
Recurrence: &Recurrence{
|
||||
StartTime: time.Date(2024, 04, 01, 12, 0, 0, 0, time.UTC),
|
||||
StartTime: time.Date(2024, 4, 1, 12, 0, 0, 0, time.UTC),
|
||||
Duration: valuer.MustParseTextDuration("2h"),
|
||||
RepeatType: RepeatTypeWeekly,
|
||||
RepeatOn: []RepeatOn{RepeatOnMonday},
|
||||
},
|
||||
},
|
||||
},
|
||||
ts: time.Date(2024, 04, 14, 12, 10, 0, 0, time.UTC), // 14th 04 is sunday
|
||||
ts: time.Date(2024, 4, 14, 12, 10, 0, 0, time.UTC), // 14th 04 is sunday
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
@@ -540,14 +539,14 @@ func TestShouldSkipMaintenance(t *testing.T) {
|
||||
Schedule: &Schedule{
|
||||
Timezone: "UTC",
|
||||
Recurrence: &Recurrence{
|
||||
StartTime: time.Date(2024, 04, 01, 12, 0, 0, 0, time.UTC),
|
||||
StartTime: time.Date(2024, 4, 1, 12, 0, 0, 0, time.UTC),
|
||||
Duration: valuer.MustParseTextDuration("2h"),
|
||||
RepeatType: RepeatTypeWeekly,
|
||||
RepeatOn: []RepeatOn{RepeatOnMonday},
|
||||
},
|
||||
},
|
||||
},
|
||||
ts: time.Date(2024, 04, 16, 12, 10, 0, 0, time.UTC), // 16th 04 is tuesday
|
||||
ts: time.Date(2024, 4, 16, 12, 10, 0, 0, time.UTC), // 16th 04 is tuesday
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
@@ -556,14 +555,14 @@ func TestShouldSkipMaintenance(t *testing.T) {
|
||||
Schedule: &Schedule{
|
||||
Timezone: "UTC",
|
||||
Recurrence: &Recurrence{
|
||||
StartTime: time.Date(2024, 04, 01, 12, 0, 0, 0, time.UTC),
|
||||
StartTime: time.Date(2024, 4, 1, 12, 0, 0, 0, time.UTC),
|
||||
Duration: valuer.MustParseTextDuration("2h"),
|
||||
RepeatType: RepeatTypeWeekly,
|
||||
RepeatOn: []RepeatOn{RepeatOnMonday},
|
||||
},
|
||||
},
|
||||
},
|
||||
ts: time.Date(2024, 05, 06, 12, 10, 0, 0, time.UTC),
|
||||
ts: time.Date(2024, 5, 6, 12, 10, 0, 0, time.UTC),
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
@@ -572,14 +571,14 @@ func TestShouldSkipMaintenance(t *testing.T) {
|
||||
Schedule: &Schedule{
|
||||
Timezone: "UTC",
|
||||
Recurrence: &Recurrence{
|
||||
StartTime: time.Date(2024, 04, 01, 12, 0, 0, 0, time.UTC),
|
||||
StartTime: time.Date(2024, 4, 1, 12, 0, 0, 0, time.UTC),
|
||||
Duration: valuer.MustParseTextDuration("2h"),
|
||||
RepeatType: RepeatTypeWeekly,
|
||||
RepeatOn: []RepeatOn{RepeatOnMonday},
|
||||
},
|
||||
},
|
||||
},
|
||||
ts: time.Date(2024, 05, 06, 14, 00, 0, 0, time.UTC),
|
||||
ts: time.Date(2024, 5, 6, 14, 0, 0, 0, time.UTC),
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
@@ -588,13 +587,13 @@ func TestShouldSkipMaintenance(t *testing.T) {
|
||||
Schedule: &Schedule{
|
||||
Timezone: "UTC",
|
||||
Recurrence: &Recurrence{
|
||||
StartTime: time.Date(2024, 04, 04, 12, 0, 0, 0, time.UTC),
|
||||
StartTime: time.Date(2024, 4, 4, 12, 0, 0, 0, time.UTC),
|
||||
Duration: valuer.MustParseTextDuration("2h"),
|
||||
RepeatType: RepeatTypeMonthly,
|
||||
},
|
||||
},
|
||||
},
|
||||
ts: time.Date(2024, 04, 04, 12, 10, 0, 0, time.UTC),
|
||||
ts: time.Date(2024, 4, 4, 12, 10, 0, 0, time.UTC),
|
||||
skip: true,
|
||||
},
|
||||
{
|
||||
@@ -603,13 +602,13 @@ func TestShouldSkipMaintenance(t *testing.T) {
|
||||
Schedule: &Schedule{
|
||||
Timezone: "UTC",
|
||||
Recurrence: &Recurrence{
|
||||
StartTime: time.Date(2024, 04, 04, 12, 0, 0, 0, time.UTC),
|
||||
StartTime: time.Date(2024, 4, 4, 12, 0, 0, 0, time.UTC),
|
||||
Duration: valuer.MustParseTextDuration("2h"),
|
||||
RepeatType: RepeatTypeMonthly,
|
||||
},
|
||||
},
|
||||
},
|
||||
ts: time.Date(2024, 04, 04, 14, 10, 0, 0, time.UTC),
|
||||
ts: time.Date(2024, 4, 4, 14, 10, 0, 0, time.UTC),
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
@@ -618,13 +617,52 @@ func TestShouldSkipMaintenance(t *testing.T) {
|
||||
Schedule: &Schedule{
|
||||
Timezone: "UTC",
|
||||
Recurrence: &Recurrence{
|
||||
StartTime: time.Date(2024, 04, 04, 12, 0, 0, 0, time.UTC),
|
||||
StartTime: time.Date(2024, 4, 4, 12, 0, 0, 0, time.UTC),
|
||||
Duration: valuer.MustParseTextDuration("2h"),
|
||||
RepeatType: RepeatTypeMonthly,
|
||||
},
|
||||
},
|
||||
},
|
||||
ts: time.Date(2024, 05, 04, 12, 10, 0, 0, time.UTC),
|
||||
ts: time.Date(2024, 5, 4, 12, 10, 0, 0, time.UTC),
|
||||
skip: true,
|
||||
},
|
||||
// The recurrence should govern, when set. Not the fixed range.
|
||||
{
|
||||
name: "recurring-daily-with-fixed-times-outside-daily-window",
|
||||
maintenance: &PlannedMaintenance{
|
||||
Schedule: &Schedule{
|
||||
Timezone: "UTC",
|
||||
// These fixed fields should be ignored when Recurrence is set.
|
||||
StartTime: time.Date(2026, 4, 1, 10, 0, 0, 0, time.UTC),
|
||||
EndTime: time.Date(2026, 4, 30, 18, 0, 0, 0, time.UTC),
|
||||
Recurrence: &Recurrence{
|
||||
StartTime: time.Date(2026, 4, 1, 14, 0, 0, 0, time.UTC), // daily at 14:00
|
||||
Duration: valuer.MustParseTextDuration("2h"), // until 16:00
|
||||
RepeatType: RepeatTypeDaily,
|
||||
},
|
||||
},
|
||||
},
|
||||
// 11:00 is inside the fixed range but outside the daily 14:00-16:00 window.
|
||||
// Before the fix this returned true (bug); after fix it returns false.
|
||||
ts: time.Date(2026, 4, 15, 11, 0, 0, 0, time.UTC),
|
||||
skip: false,
|
||||
},
|
||||
{
|
||||
name: "recurring-daily-with-fixed-times-inside-daily-window",
|
||||
maintenance: &PlannedMaintenance{
|
||||
Schedule: &Schedule{
|
||||
Timezone: "UTC",
|
||||
StartTime: time.Date(2026, 4, 1, 10, 0, 0, 0, time.UTC),
|
||||
EndTime: time.Date(2026, 4, 30, 18, 0, 0, 0, time.UTC),
|
||||
Recurrence: &Recurrence{
|
||||
StartTime: time.Date(2026, 4, 1, 14, 0, 0, 0, time.UTC),
|
||||
Duration: valuer.MustParseTextDuration("2h"),
|
||||
RepeatType: RepeatTypeDaily,
|
||||
},
|
||||
},
|
||||
},
|
||||
// 15:00 is inside the daily 14:00-16:00 window — should skip.
|
||||
ts: time.Date(2026, 4, 15, 15, 0, 0, 0, time.UTC),
|
||||
skip: true,
|
||||
},
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user