Compare commits

...

114 Commits

Author SHA1 Message Date
nikhilmantri0902
98eb002e07 chore: simplify 2026-04-24 18:14:48 +05:30
nikhilmantri0902
720379db9f chore: get onboarding spec 2026-04-24 17:47:03 +05:30
nikhilmantri0902
6ad14e7151 chore: renamed method 2026-04-24 17:38:56 +05:30
nikhilmantri0902
181fca064b chore: added onboarding api 2026-04-24 17:33:03 +05:30
nikhilmantri0902
a5e39ca6bd Merge branch 'infraM/v2_pods_list_api' into infraM/v2_onboarding_api 2026-04-24 17:25:27 +05:30
Ashwin Bhatkal
b35c6676f9 fix: rebase fixes 2026-04-24 13:17:02 +05:30
nikhilmantri0902
1095caa123 chore: improved api description to document -1 as no data in numeric fields 2026-04-24 12:11:45 +05:30
nikhilmantri0902
9043b49762 chore: removed pods - order by phase 2026-04-24 12:04:51 +05:30
nikhilmantri0902
d4084a7494 chore: added support for pod phase unknown 2026-04-24 11:46:26 +05:30
nikhilmantri0902
27c564b3bf chore: added required tags 2026-04-24 11:21:20 +05:30
Nikhil Mantri
f02c491828 Merge branch 'main' into infraM/v2_pods_list_api 2026-04-24 10:47:13 +05:30
Nikhil Mantri
3d53b8f77f Merge branch 'main' into infraM/v2_pods_list_api 2026-04-23 18:44:33 +05:30
nikhilmantri0902
dffe94fec4 chore: conflicts resolved 2026-04-23 18:39:39 +05:30
nikhilmantri0902
c9360fcf13 Merge branch 'infraM/v2_hosts_list_api' into infraM/v2_pods_list_api 2026-04-23 11:23:49 +05:30
nikhilmantri0902
b5ab45db20 chore: regen api client for inframonitoring
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-04-23 10:51:35 +05:30
Nikhil Mantri
08f76aca78 Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-23 09:51:01 +05:30
nikhilmantri0902
d81cec4c29 chore: added onboarding splits 2026-04-22 19:22:37 +05:30
nikhilmantri0902
49744c6104 chore: added attrs presence check function 2026-04-22 19:06:26 +05:30
nikhilmantri0902
2147627baf chore: added specs for all component types 2026-04-22 19:00:46 +05:30
nikhilmantri0902
824f92a88f chore: added types and constants 2026-04-22 18:21:55 +05:30
nikhilmantri0902
983d4fe4f2 Merge branch 'infraM/v2_hosts_list_api' into infraM/v2_pods_list_api 2026-04-22 15:37:21 +05:30
nikhilmantri0902
833af794c3 chore: make sort stable in case of tiebreaker by comparing composite group by keys 2026-04-22 15:26:28 +05:30
nikhilmantri0902
21b51d1fcc chore: cleanup and rename 2026-04-22 15:13:00 +05:30
nikhilmantri0902
56f22682c8 Merge branch 'infraM/v2_hosts_list_api' into infraM/v2_pods_list_api 2026-04-22 14:29:17 +05:30
nikhilmantri0902
9c8359940c chore: remove a defensive nil map check, the function ensure non-nil map when err nil 2026-04-22 11:59:01 +05:30
Nikhil Mantri
4050880275 Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-22 11:35:57 +05:30
nikhilmantri0902
5e775f64f2 chore: added status unauthorized 2026-04-21 21:30:44 +05:30
nikhilmantri0902
0189f23f46 chore: removed internal server error 2026-04-21 21:30:01 +05:30
nikhilmantri0902
49a36d4e3d chore: removed pod metric temporalities 2026-04-21 21:24:49 +05:30
nikhilmantri0902
9407d658ab chore: merge base hosts v2 branch 2026-04-21 21:17:28 +05:30
nikhilmantri0902
5035712485 chore: added json tag required: true 2026-04-21 18:50:25 +05:30
nikhilmantri0902
bab17c3615 chore: comments resolve 2026-04-21 18:33:56 +05:30
Nikhil Mantri
37b44f4db9 Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-21 17:40:06 +05:30
nikhilmantri0902
99dd6e5f1e chore: pods code restructuring 2026-04-21 17:03:13 +05:30
nikhilmantri0902
9c7131fa6a chore: merge base branch 2026-04-21 16:22:55 +05:30
Nikhil Mantri
ad889a2e1d Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-21 13:48:53 +05:30
nikhilmantri0902
a4f6d0cbf5 chore: removed temporalities 2026-04-21 13:44:06 +05:30
nikhilmantri0902
589bed7c16 chore: comments correction 2026-04-21 12:50:51 +05:30
nikhilmantri0902
93843a1f48 chore: file structure further breakdown for clarity 2026-04-21 12:36:07 +05:30
nikhilmantri0902
88c43108fc chore: added types package 2026-04-20 18:52:43 +05:30
nikhilmantri0902
ed4cf540e8 chore: inframonitoring types renaming 2026-04-20 18:47:28 +05:30
nikhilmantri0902
9e2dfa9033 chore: rearrangement 2026-04-20 17:51:03 +05:30
nikhilmantri0902
d98d5d68ee chore: rename PodsList -> ListPods 2026-04-20 16:57:21 +05:30
nikhilmantri0902
2cb1c3b73b chore: rename HostsList -> ListHosts 2026-04-20 16:42:19 +05:30
nikhilmantri0902
ae7ca497ad chore: merged base hosts branch and reorganized code 2026-04-20 13:38:25 +05:30
Nikhil Mantri
a579916961 Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-20 11:05:36 +05:30
Nikhil Mantri
4a16d56abf feat(infra-monitoring): v2 hosts list - return counts of active & inactive hosts for custom group by attributes (#10956)
* chore: add functionality for showing active and inactive counts in custom group by

* chore: bug fix

* chore: added subquery for active and total count

* chore: ignore empty string hosts in get active hosts

* fix: sinceUnixMilli for determining active hosts compute once per request

* chore: refactor code
2026-04-20 10:41:15 +05:30
Nikhil Mantri
642b5ac3f0 Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-16 16:32:39 +05:30
Nikhil Mantri
a12112619c Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-16 15:41:35 +05:30
nikhilmantri0902
014785f1bc chore: ignore empty string hosts in get active hosts 2026-04-16 13:17:15 +05:30
Nikhil Mantri
58ee797b10 Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-15 14:18:29 +05:30
Nikhil Mantri
82d236742f Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-15 11:21:33 +05:30
nikhilmantri0902
397e1ad5be chore: added TODOs and made filterByStatus a part of filter struct 2026-04-14 18:32:48 +05:30
nikhilmantri0902
8d6b25ca9b chore: resolved conflicts 2026-04-14 17:09:17 +05:30
nikhilmantri0902
5fa6bd8b8d Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-13 11:02:14 +05:30
nikhilmantri0902
bd9977483b chore: improved description 2026-04-11 11:31:35 +05:30
nikhilmantri0902
50fbdfeeef chore: validate order by to validate function 2026-04-10 19:01:45 +05:30
nikhilmantri0902
e2b1b73e87 chore: improvements 2026-04-10 13:23:33 +05:30
nikhilmantri0902
cb9f3fd3e5 chore: rearrage 2026-04-10 00:39:23 +05:30
nikhilmantri0902
232acc343d chore: escape backtick to prevent sql injection 2026-04-10 00:01:01 +05:30
nikhilmantri0902
2025afdccc chore: endpoint modification openapi 2026-04-09 23:25:59 +05:30
nikhilmantri0902
d2f4d4af93 chore: endpoint correction 2026-04-09 23:21:57 +05:30
Nikhil Mantri
47ff7bbb8e Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-09 23:20:39 +05:30
Nikhil Mantri
724071c5dc Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-09 18:30:15 +05:30
nikhilmantri0902
4d24979358 chore: frontend fix 2026-04-09 18:26:42 +05:30
nikhilmantri0902
042943b10a chore: distributed samples table to local table change for get metadata 2026-04-09 18:24:45 +05:30
nikhilmantri0902
48a9be7ec8 chore: added required metrics check 2026-04-09 17:38:48 +05:30
nikhilmantri0902
a9504b2120 chore: added a TODO remark 2026-04-09 16:08:34 +05:30
nikhilmantri0902
8755887c4a chore: added better metrics existence check 2026-04-09 16:01:35 +05:30
Nikhil Mantri
4cb4662b3a Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-09 15:14:25 +05:30
nikhilmantri0902
e6900dabc8 chore: warnings added passing from queryResponse warning to host lists response struct 2026-04-09 00:09:38 +05:30
nikhilmantri0902
c1ba389b63 chore: add type for response and files rearrange 2026-04-08 23:35:53 +05:30
nikhilmantri0902
3a1f40234f Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-08 23:03:50 +05:30
Nikhil Mantri
2e4891fa63 Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-08 16:07:57 +05:30
Nikhil Mantri
04ebc0bec7 Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-08 11:08:10 +05:30
nikhilmantri0902
271f9b81ed Merge branch 'infraM/v2_hosts_list_api' into infraM/v2_pods_list_api 2026-04-07 21:55:47 +05:30
nikhilmantri0902
6fa815c294 chore: modified getMetadata query 2026-04-07 18:55:57 +05:30
nikhilmantri0902
63ec518efb chore: added hostName logic 2026-04-07 17:36:15 +05:30
nikhilmantri0902
c4ca20dd90 chore: return errors from getMetadata and lint fix 2026-04-07 17:01:13 +05:30
nikhilmantri0902
e56cc4222b chore: return errors from getMetadata and lint fix 2026-04-07 16:57:35 +05:30
nikhilmantri0902
07d2944d7c chore: yarn generate api 2026-04-07 16:44:06 +05:30
nikhilmantri0902
dea01ae36a chore: hostStatusNone added for clarity that this field can be left empty as well in payload 2026-04-07 16:32:25 +05:30
nikhilmantri0902
62ea5b54e2 Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-07 14:09:48 +05:30
nikhilmantri0902
e549a7e42f chore: added pods list api updates 2026-04-07 13:58:10 +05:30
nikhilmantri0902
90e2ebb11f Merge branch 'infraM/v2_hosts_list_api' into infraM/v2_pods_list_api 2026-04-07 13:51:35 +05:30
nikhilmantri0902
61baa1be7a chore: code improvements 2026-04-07 13:49:00 +05:30
nikhilmantri0902
b946fa665f Merge branch 'infraM/v2_hosts_list_api' into infraM/v2_pods_list_api 2026-04-07 11:15:35 +05:30
nikhilmantri0902
2e049556e4 chore: unified composite key function 2026-04-07 11:15:03 +05:30
nikhilmantri0902
492a5e70d7 chore: added pods metrics temporality 2026-04-06 17:33:44 +05:30
nikhilmantri0902
ba1f2771e8 Merge branch 'infraM/v2_hosts_list_api' into infraM/v2_pods_list_api 2026-04-06 17:18:44 +05:30
nikhilmantri0902
7458fb4855 Merge branch 'main' into infraM/v2_hosts_list_api 2026-04-06 17:18:01 +05:30
nikhilmantri0902
5f55f3938b chore: added temporalities of metrics 2026-04-06 17:17:15 +05:30
nikhilmantri0902
3e8102485c Merge branch 'infraM/v2_hosts_list_api' into infraM/v2_pods_list_api 2026-04-04 20:52:50 +05:30
nikhilmantri0902
861c682ea5 chore: nil pointer dereference fix in req.Filter 2026-04-04 20:52:08 +05:30
nikhilmantri0902
c8e5895dff chore: nil pointer check 2026-04-04 20:45:04 +05:30
nikhilmantri0902
82d72e7edb chore: pods api meta start time 2026-04-04 17:18:04 +05:30
nikhilmantri0902
a3f8ecaaf1 chore: merged base branch 2026-04-04 16:47:10 +05:30
nikhilmantri0902
19aada656c chore: updated spec 2026-04-04 16:44:15 +05:30
nikhilmantri0902
b21bb4280f chore: updated openapi yml 2026-04-04 16:38:22 +05:30
nikhilmantri0902
bc0a4fdb5c chore: added pods list logic 2026-04-04 13:24:46 +05:30
nikhilmantri0902
37fb0e9254 Merge branch 'infraM/base_dependencies' into infraM/v2_hosts_list_api 2026-04-03 17:49:00 +05:30
nikhilmantri0902
aecfa1a174 chore: added validation on order by 2026-04-02 20:13:30 +05:30
nikhilmantri0902
b869d23d94 chore: moved funcs 2026-04-02 20:02:22 +05:30
nikhilmantri0902
6ee3d44f76 chore: removed isSendingK8sAgentsMetricsCode 2026-04-02 19:58:30 +05:30
nikhilmantri0902
462e554107 chore: yarn generate api 2026-04-02 14:49:15 +05:30
nikhilmantri0902
66afa73e6f chore: return status as a string 2026-04-02 14:39:02 +05:30
nikhilmantri0902
54c604bcf4 chore: added some unit tests 2026-04-02 14:20:27 +05:30
nikhilmantri0902
c1be02ba54 chore: added validate function 2026-04-02 14:14:34 +05:30
nikhilmantri0902
d3c7ba8f45 chore: disk usage 2026-04-02 14:01:18 +05:30
nikhilmantri0902
039c4a0496 fix: bug fix 2026-04-02 11:32:49 +05:30
nikhilmantri0902
51a94b6bbc chore: added logic for hosts v3 api 2026-04-02 02:52:28 +05:30
nikhilmantri0902
bbfbb94f52 chore: merged main 2026-04-01 00:45:40 +05:30
nikhilmantri0902
d1eb9ef16f chore: endpoint detail update 2026-03-31 16:16:31 +05:30
nikhilmantri0902
3db00f8bc3 chore: baseline setup 2026-03-31 15:27:18 +05:30
30 changed files with 2510 additions and 69 deletions

View File

@@ -2467,6 +2467,82 @@ components:
- requiredMetricsCheck
- endTimeBeforeRetention
type: object
InframonitoringtypesPodPhase:
enum:
- pending
- running
- succeeded
- failed
- unknown
- ""
type: string
InframonitoringtypesPodRecord:
properties:
meta:
additionalProperties: {}
nullable: true
type: object
podAge:
format: int64
type: integer
podCPU:
format: double
type: number
podCPULimit:
format: double
type: number
podCPURequest:
format: double
type: number
podMemory:
format: double
type: number
podMemoryLimit:
format: double
type: number
podMemoryRequest:
format: double
type: number
podPhase:
$ref: '#/components/schemas/InframonitoringtypesPodPhase'
podUID:
type: string
required:
- podUID
- podCPU
- podCPURequest
- podCPULimit
- podMemory
- podMemoryRequest
- podMemoryLimit
- podPhase
- podAge
- meta
type: object
InframonitoringtypesPods:
properties:
endTimeBeforeRetention:
type: boolean
records:
items:
$ref: '#/components/schemas/InframonitoringtypesPodRecord'
nullable: true
type: array
requiredMetricsCheck:
$ref: '#/components/schemas/InframonitoringtypesRequiredMetricsCheck'
total:
type: integer
type:
$ref: '#/components/schemas/InframonitoringtypesResponseType'
warning:
$ref: '#/components/schemas/Querybuildertypesv5QueryWarnData'
required:
- type
- records
- total
- requiredMetricsCheck
- endTimeBeforeRetention
type: object
InframonitoringtypesPostableHosts:
properties:
end:
@@ -2493,6 +2569,32 @@ components:
- end
- limit
type: object
InframonitoringtypesPostablePods:
properties:
end:
format: int64
type: integer
filter:
$ref: '#/components/schemas/Querybuildertypesv5Filter'
groupBy:
items:
$ref: '#/components/schemas/Querybuildertypesv5GroupByKey'
nullable: true
type: array
limit:
type: integer
offset:
type: integer
orderBy:
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
start:
format: int64
type: integer
required:
- start
- end
- limit
type: object
InframonitoringtypesRequiredMetricsCheck:
properties:
missingMetrics:
@@ -10220,7 +10322,9 @@ paths:
five metrics, and pagination via offset/limit. The response type is ''list''
for the default host.name grouping or ''grouped_list'' for custom groupBy
keys. Also reports missing required metrics and whether the requested time
range falls before the data retention boundary.'
range falls before the data retention boundary. Numeric metric fields (cpu,
memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available
for that field; frontends should render ''—'' rather than the literal value.'
operationId: ListHosts
requestBody:
content:
@@ -10274,6 +10378,76 @@ paths:
summary: List Hosts for Infra Monitoring
tags:
- inframonitoring
/api/v2/infra_monitoring/pods:
post:
deprecated: false
description: 'Returns a paginated list of Kubernetes pods with key metrics:
CPU usage, CPU request/limit utilization, memory working set, memory request/limit
utilization, current pod phase (pending/running/succeeded/failed/unknown),
and pod age (ms since start time). Each pod includes metadata attributes (namespace,
node, workload owner such as deployment/statefulset/daemonset/job/cronjob,
cluster). Supports filtering via a filter expression, custom groupBy to aggregate
pods by any attribute, ordering by any of the six metrics (cpu, cpu_request,
cpu_limit, memory, memory_request, memory_limit), and pagination via offset/limit.
The response type is ''list'' for the default k8s.pod.uid grouping or ''grouped_list''
for custom groupBy keys. Also reports missing required metrics and whether
the requested time range falls before the data retention boundary. Numeric
metric fields (podCPU, podCPURequest, podCPULimit, podMemory, podMemoryRequest,
podMemoryLimit, podAge) return -1 as a sentinel when no data is available
for that field; frontends should render ''—'' rather than the literal value.'
operationId: ListPods
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/InframonitoringtypesPostablePods'
responses:
"200":
content:
application/json:
schema:
properties:
data:
$ref: '#/components/schemas/InframonitoringtypesPods'
status:
type: string
required:
- status
- data
type: object
description: OK
"400":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Bad Request
"401":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Unauthorized
"403":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Forbidden
"500":
content:
application/json:
schema:
$ref: '#/components/schemas/RenderErrorResponse'
description: Internal Server Error
security:
- api_key:
- VIEWER
- tokenizer:
- VIEWER
summary: List Pods for Infra Monitoring
tags:
- inframonitoring
/api/v2/livez:
get:
deprecated: false

View File

@@ -13,7 +13,9 @@ import type {
import type {
InframonitoringtypesPostableHostsDTO,
InframonitoringtypesPostablePodsDTO,
ListHosts200,
ListPods200,
RenderErrorResponseDTO,
} from '../sigNoz.schemas';
@@ -21,7 +23,7 @@ import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
import type { ErrorType, BodyType } from '../../../generatedAPIInstance';
/**
* Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary.
* Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (cpu, memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available for that field; frontends should render '—' rather than the literal value.
* @summary List Hosts for Infra Monitoring
*/
export const listHosts = (
@@ -104,3 +106,87 @@ export const useListHosts = <
return useMutation(mutationOptions);
};
/**
* Returns a paginated list of Kubernetes pods with key metrics: CPU usage, CPU request/limit utilization, memory working set, memory request/limit utilization, current pod phase (pending/running/succeeded/failed/unknown), and pod age (ms since start time). Each pod includes metadata attributes (namespace, node, workload owner such as deployment/statefulset/daemonset/job/cronjob, cluster). Supports filtering via a filter expression, custom groupBy to aggregate pods by any attribute, ordering by any of the six metrics (cpu, cpu_request, cpu_limit, memory, memory_request, memory_limit), and pagination via offset/limit. The response type is 'list' for the default k8s.pod.uid grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (podCPU, podCPURequest, podCPULimit, podMemory, podMemoryRequest, podMemoryLimit, podAge) return -1 as a sentinel when no data is available for that field; frontends should render '—' rather than the literal value.
* @summary List Pods for Infra Monitoring
*/
export const listPods = (
inframonitoringtypesPostablePodsDTO: BodyType<InframonitoringtypesPostablePodsDTO>,
signal?: AbortSignal,
) => {
return GeneratedAPIInstance<ListPods200>({
url: `/api/v2/infra_monitoring/pods`,
method: 'POST',
headers: { 'Content-Type': 'application/json' },
data: inframonitoringtypesPostablePodsDTO,
signal,
});
};
export const getListPodsMutationOptions = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listPods>>,
TError,
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
TContext
>;
}): UseMutationOptions<
Awaited<ReturnType<typeof listPods>>,
TError,
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
TContext
> => {
const mutationKey = ['listPods'];
const { mutation: mutationOptions } = options
? options.mutation &&
'mutationKey' in options.mutation &&
options.mutation.mutationKey
? options
: { ...options, mutation: { ...options.mutation, mutationKey } }
: { mutation: { mutationKey } };
const mutationFn: MutationFunction<
Awaited<ReturnType<typeof listPods>>,
{ data: BodyType<InframonitoringtypesPostablePodsDTO> }
> = (props) => {
const { data } = props ?? {};
return listPods(data);
};
return { mutationFn, ...mutationOptions };
};
export type ListPodsMutationResult = NonNullable<
Awaited<ReturnType<typeof listPods>>
>;
export type ListPodsMutationBody =
BodyType<InframonitoringtypesPostablePodsDTO>;
export type ListPodsMutationError = ErrorType<RenderErrorResponseDTO>;
/**
* @summary List Pods for Infra Monitoring
*/
export const useListPods = <
TError = ErrorType<RenderErrorResponseDTO>,
TContext = unknown,
>(options?: {
mutation?: UseMutationOptions<
Awaited<ReturnType<typeof listPods>>,
TError,
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
TContext
>;
}): UseMutationResult<
Awaited<ReturnType<typeof listPods>>,
TError,
{ data: BodyType<InframonitoringtypesPostablePodsDTO> },
TContext
> => {
const mutationOptions = getListPodsMutationOptions(options);
return useMutation(mutationOptions);
};

View File

@@ -3238,6 +3238,88 @@ export interface InframonitoringtypesHostsDTO {
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export enum InframonitoringtypesPodPhaseDTO {
pending = 'pending',
running = 'running',
succeeded = 'succeeded',
failed = 'failed',
unknown = 'unknown',
'' = '',
}
/**
* @nullable
*/
export type InframonitoringtypesPodRecordDTOMeta = {
[key: string]: unknown;
} | null;
export interface InframonitoringtypesPodRecordDTO {
/**
* @type object
* @nullable true
*/
meta: InframonitoringtypesPodRecordDTOMeta;
/**
* @type integer
* @format int64
*/
podAge: number;
/**
* @type number
* @format double
*/
podCPU: number;
/**
* @type number
* @format double
*/
podCPULimit: number;
/**
* @type number
* @format double
*/
podCPURequest: number;
/**
* @type number
* @format double
*/
podMemory: number;
/**
* @type number
* @format double
*/
podMemoryLimit: number;
/**
* @type number
* @format double
*/
podMemoryRequest: number;
podPhase: InframonitoringtypesPodPhaseDTO;
/**
* @type string
*/
podUID: string;
}
export interface InframonitoringtypesPodsDTO {
/**
* @type boolean
*/
endTimeBeforeRetention: boolean;
/**
* @type array
* @nullable true
*/
records: InframonitoringtypesPodRecordDTO[] | null;
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
/**
* @type integer
*/
total: number;
type: InframonitoringtypesResponseTypeDTO;
warning?: Querybuildertypesv5QueryWarnDataDTO;
}
export interface InframonitoringtypesPostableHostsDTO {
/**
* @type integer
@@ -3266,6 +3348,34 @@ export interface InframonitoringtypesPostableHostsDTO {
start: number;
}
export interface InframonitoringtypesPostablePodsDTO {
/**
* @type integer
* @format int64
*/
end: number;
filter?: Querybuildertypesv5FilterDTO;
/**
* @type array
* @nullable true
*/
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
/**
* @type integer
*/
limit: number;
/**
* @type integer
*/
offset?: number;
orderBy?: Querybuildertypesv5OrderByDTO;
/**
* @type integer
* @format int64
*/
start: number;
}
export interface InframonitoringtypesRequiredMetricsCheckDTO {
/**
* @type array
@@ -7102,6 +7212,14 @@ export type ListHosts200 = {
status: string;
};
export type ListPods200 = {
data: InframonitoringtypesPodsDTO;
/**
* @type string
*/
status: string;
};
export type Livez200 = {
data: FactoryResponseDTO;
/**

View File

@@ -123,10 +123,11 @@ function ChartPreview({
});
const { currentQuery } = useQueryBuilder();
const { minTime, maxTime, selectedTime: globalSelectedInterval } = useSelector<
AppState,
GlobalReducer
>((state) => state.globalTime);
const {
minTime,
maxTime,
selectedTime: globalSelectedInterval,
} = useSelector<AppState, GlobalReducer>((state) => state.globalTime);
const { featureFlags } = useAppContext();
@@ -138,8 +139,8 @@ function ChartPreview({
if (startTime && endTime && startTime !== endTime) {
dispatch(
UpdateTimeInterval('custom', [
parseInt(getTimeString(startTime), 10),
parseInt(getTimeString(endTime), 10),
Number.parseInt(getTimeString(startTime), 10),
Number.parseInt(getTimeString(endTime), 10),
]),
);
}
@@ -221,12 +222,11 @@ function ChartPreview({
// Initialize graph visibility from localStorage
useEffect(() => {
if (queryResponse?.data?.payload?.data?.result) {
const {
graphVisibilityStates: localStoredVisibilityState,
} = getLocalStorageGraphVisibilityState({
apiResponse: queryResponse.data.payload.data.result,
name: 'alert-chart-preview',
});
const { graphVisibilityStates: localStoredVisibilityState } =
getLocalStorageGraphVisibilityState({
apiResponse: queryResponse.data.payload.data.result,
name: 'alert-chart-preview',
});
setGraphVisibility(localStoredVisibilityState);
}
}, [queryResponse?.data?.payload?.data?.result]);

View File

@@ -127,9 +127,8 @@ function FormAlertRules({
handleSetConfig,
redirectWithQueryBuilderData,
} = useQueryBuilder();
const { matchType, op, target, targetUnit } = usePrefillAlertConditions(
stagedQuery,
);
const { matchType, op, target, targetUnit } =
usePrefillAlertConditions(stagedQuery);
useEffect(() => {
handleSetConfig(panelType || PANEL_TYPES.TIME_SERIES, dataSource);
@@ -172,9 +171,10 @@ function FormAlertRules({
}, [currentQuery.unit]);
// initQuery contains initial query when component was mounted
const initQuery = useMemo(() => initialValue.condition.compositeQuery, [
initialValue,
]);
const initQuery = useMemo(
() => initialValue.condition.compositeQuery,
[initialValue],
);
const queryOptions = useMemo(() => {
const involvedQueriesInTraceOperator = getInvolvedQueriesInTraceOperator(
@@ -370,7 +370,7 @@ function FormAlertRules({
// onQueryCategoryChange handles changes to query category
// in state as well as sets additional defaults
const onQueryCategoryChange = (val: EQueryType): void => {
const element = document.getElementById('top');
const element = document.querySelector('#top');
if (element) {
element.scrollIntoView({ behavior: 'smooth' });
}
@@ -808,9 +808,10 @@ function FormAlertRules({
featureFlags?.find((flag) => flag.name === FeatureKeys.ANOMALY_DETECTION)
?.active || false;
const source = useMemo(() => urlQuery.get(QueryParams.source) as YAxisSource, [
urlQuery,
]);
const source = useMemo(
() => urlQuery.get(QueryParams.source) as YAxisSource,
[urlQuery],
);
// Only update automatically when creating a new metrics-based alert rule
const shouldUpdateYAxisUnit = useMemo(() => {

View File

@@ -108,9 +108,8 @@ function Explorer(): JSX.Element {
const [showOneChartPerQuery, toggleShowOneChartPerQuery] = useState(
isOneChartPerQueryEnabled,
);
const [disableOneChartPerQuery, toggleDisableOneChartPerQuery] = useState(
false,
);
const [disableOneChartPerQuery, toggleDisableOneChartPerQuery] =
useState(false);
const [yAxisUnit, setYAxisUnit] = useState<string | undefined>();
const unitsLength = useMemo(() => units.length, [units]);
@@ -280,7 +279,7 @@ function Explorer(): JSX.Element {
[],
);
const [warning, setWarning] = useState<Warning | undefined>(undefined);
const [warning, setWarning] = useState<Warning | undefined>();
const oneChartPerQueryDisabledTooltip = useMemo(() => {
if (splitedQueries.length <= 1) {
@@ -292,7 +291,7 @@ function Explorer(): JSX.Element {
if (disableOneChartPerQuery) {
return 'One chart per query cannot be disabled for multiple queries with different units.';
}
return undefined;
return;
}, [disableOneChartPerQuery, splitedQueries.length, units.length]);
// Show the y axis unit selector if -

View File

@@ -35,20 +35,14 @@ function Inspect({
onClose,
}: InspectProps): JSX.Element {
const isDarkMode = useIsDarkMode();
const [currentMetricName, setCurrentMetricName] = useState<string>(
defaultMetricName,
);
const [appliedMetricName, setAppliedMetricName] = useState<string>(
defaultMetricName,
);
const [
popoverOptions,
setPopoverOptions,
] = useState<GraphPopoverOptions | null>(null);
const [
expandedViewOptions,
setExpandedViewOptions,
] = useState<GraphPopoverOptions | null>(null);
const [currentMetricName, setCurrentMetricName] =
useState<string>(defaultMetricName);
const [appliedMetricName, setAppliedMetricName] =
useState<string>(defaultMetricName);
const [popoverOptions, setPopoverOptions] =
useState<GraphPopoverOptions | null>(null);
const [expandedViewOptions, setExpandedViewOptions] =
useState<GraphPopoverOptions | null>(null);
const [showExpandedView, setShowExpandedView] = useState(false);
const { data: metricDetailsData } = useGetMetricMetadata(
@@ -144,13 +138,15 @@ function Inspect({
[dispatchMetricInspectionOptions],
);
const selectedMetricType = useMemo(() => metricDetailsData?.data?.type, [
metricDetailsData,
]);
const selectedMetricType = useMemo(
() => metricDetailsData?.data?.type,
[metricDetailsData],
);
const selectedMetricUnit = useMemo(() => metricDetailsData?.data?.unit, [
metricDetailsData,
]);
const selectedMetricUnit = useMemo(
() => metricDetailsData?.data?.unit,
[metricDetailsData],
);
const aggregateAttribute = useMemo(
() => ({
@@ -221,7 +217,7 @@ function Inspect({
);
}
if (!inspectMetricsTimeSeries.length) {
if (inspectMetricsTimeSeries.length === 0) {
return renderFallback(
'inspect-metrics-empty',
<Empty description="No time series found for this metric to inspect." />,

View File

@@ -256,10 +256,10 @@ export function useInspectMetrics(
const valuesMap = new Map<number, number>();
series.values.forEach(({ timestamp, value }) => {
valuesMap.set(timestamp, parseFloat(value));
valuesMap.set(timestamp, Number.parseFloat(value));
});
return timestamps.map((timestamp) => valuesMap.get(timestamp) ?? NaN);
return timestamps.map((timestamp) => valuesMap.get(timestamp) ?? Number.NaN);
});
const rawData = [timestamps, ...timeseriesArray];
@@ -273,7 +273,7 @@ export function useInspectMetrics(
labels.add(label);
});
});
return Array.from(labels);
return [...labels];
}, [inspectMetricsData]);
const reset = useCallback(() => {

View File

@@ -18,7 +18,7 @@ describe('RunQueryBtn', () => {
);
});
test('renders run state and triggers on click', async () => {
it('renders run state and triggers on click', async () => {
const user = userEvent.setup();
const onRun = jest.fn();
const onCancel = jest.fn();
@@ -35,7 +35,7 @@ describe('RunQueryBtn', () => {
expect(onRun).toHaveBeenCalledTimes(1);
});
test('shows cancel state and calls handleCancelQuery', async () => {
it('shows cancel state and calls handleCancelQuery', async () => {
const user = userEvent.setup();
const onRun = jest.fn();
const onCancel = jest.fn();
@@ -51,19 +51,19 @@ describe('RunQueryBtn', () => {
expect(onCancel).toHaveBeenCalledTimes(1);
});
test('disabled when disabled prop is true', () => {
it('disabled when disabled prop is true', () => {
render(<RunQueryBtn disabled />);
expect(screen.getByRole('button', { name: /run query/i })).toBeDisabled();
});
test('disabled when no props provided', () => {
it('disabled when no props provided', () => {
render(<RunQueryBtn />);
expect(
screen.getByRole('button', { name: /run query/i }),
).toBeInTheDocument();
});
test('shows Command + CornerDownLeft on mac', () => {
it('shows Command + CornerDownLeft on mac', () => {
const { container } = render(
<RunQueryBtn
onStageRunQuery={jest.fn()}
@@ -77,7 +77,7 @@ describe('RunQueryBtn', () => {
).toBeInTheDocument();
});
test('shows ChevronUp + CornerDownLeft on non-mac', () => {
it('shows ChevronUp + CornerDownLeft on non-mac', () => {
(getUserOperatingSystem as jest.Mock).mockReturnValue(
UserOperatingSystem.WINDOWS,
);
@@ -95,7 +95,7 @@ describe('RunQueryBtn', () => {
).toBeInTheDocument();
});
test('renders custom label when provided', () => {
it('renders custom label when provided', () => {
render(
<RunQueryBtn
onStageRunQuery={jest.fn()}

View File

@@ -115,8 +115,8 @@ export const useGetQueryRange: UseGetQueryRange = (
const updatedQuery = updateBarStepInterval(
requestData.query,
requestData.start ? requestData.start * 1e3 : parseInt(start, 10) * 1e3,
requestData.end ? requestData.end * 1e3 : parseInt(end, 10) * 1e3,
requestData.start ? requestData.start * 1e3 : Number.parseInt(start, 10) * 1e3,
requestData.end ? requestData.end * 1e3 : Number.parseInt(end, 10) * 1e3,
);
return {

View File

@@ -98,7 +98,7 @@ function LogsExplorer(): JSX.Element {
setIsLoadingQueries(false);
}, [queryClient]);
const [warning, setWarning] = useState<Warning | undefined>(undefined);
const [warning, setWarning] = useState<Warning | undefined>();
const handleChangeSelectedView = useCallback(
(view: ExplorerViews, querySearchParameters?: ICurrentQueryData): void => {
@@ -127,9 +127,8 @@ function LogsExplorer(): JSX.Element {
setShowFilters((prev) => !prev);
};
const {
redirectWithQuery: redirectWithOptionsData,
} = useUrlQueryData<OptionsQuery>(URL_OPTIONS, defaultOptionsQuery);
const { redirectWithQuery: redirectWithOptionsData } =
useUrlQueryData<OptionsQuery>(URL_OPTIONS, defaultOptionsQuery);
// Get and parse stored columns from localStorage
const logListOptionsFromLocalStorage = useMemo(() => {

View File

@@ -101,7 +101,7 @@ function TracesExplorer(): JSX.Element {
getExplorerViewFromUrl(searchParams, panelTypesFromUrl),
);
const [warning, setWarning] = useState<Warning | undefined>(undefined);
const [warning, setWarning] = useState<Warning | undefined>();
const [isOpen, setOpen] = useState<boolean>(true);
const defaultQuery = useMemo(

View File

@@ -16,7 +16,7 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
ID: "ListHosts",
Tags: []string{"inframonitoring"},
Summary: "List Hosts for Infra Monitoring",
Description: "Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary.",
Description: "Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (cpu, memory, wait, load15, diskUsage) return -1 as a sentinel when no data is available for that field; frontends should render '—' rather than the literal value.",
Request: new(inframonitoringtypes.PostableHosts),
RequestContentType: "application/json",
Response: new(inframonitoringtypes.Hosts),
@@ -29,5 +29,42 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
return err
}
if err := router.Handle("/api/v2/infra_monitoring/pods", handler.New(
provider.authZ.ViewAccess(provider.infraMonitoringHandler.ListPods),
handler.OpenAPIDef{
ID: "ListPods",
Tags: []string{"inframonitoring"},
Summary: "List Pods for Infra Monitoring",
Description: "Returns a paginated list of Kubernetes pods with key metrics: CPU usage, CPU request/limit utilization, memory working set, memory request/limit utilization, current pod phase (pending/running/succeeded/failed/unknown), and pod age (ms since start time). Each pod includes metadata attributes (namespace, node, workload owner such as deployment/statefulset/daemonset/job/cronjob, cluster). Supports filtering via a filter expression, custom groupBy to aggregate pods by any attribute, ordering by any of the six metrics (cpu, cpu_request, cpu_limit, memory, memory_request, memory_limit), and pagination via offset/limit. The response type is 'list' for the default k8s.pod.uid grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (podCPU, podCPURequest, podCPULimit, podMemory, podMemoryRequest, podMemoryLimit, podAge) return -1 as a sentinel when no data is available for that field; frontends should render '—' rather than the literal value.",
Request: new(inframonitoringtypes.PostablePods),
RequestContentType: "application/json",
Response: new(inframonitoringtypes.Pods),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodPost).GetError(); err != nil {
return err
}
if err := router.Handle("/api/v2/infra_monitoring/onboarding", handler.New(
provider.authZ.ViewAccess(provider.infraMonitoringHandler.GetOnboarding),
handler.OpenAPIDef{
ID: "GetOnboarding",
Tags: []string{"inframonitoring"},
Summary: "Get Onboarding Status for Infra Monitoring",
Description: "Returns the per-tab readiness of the infra-monitoring section selected by the 'type' query parameter (hosts, processes, pods, nodes, deployments, daemonsets, statefulsets, jobs, namespaces, clusters, volumes). For each collector receiver or processor that contributes required metrics or attributes, lists what is present and what is missing, with a prebuilt user-facing message and a docs link per missing component. Default-enabled metrics are those expected as soon as the receiver is configured; optional metrics require 'enabled: true' in receiver config. 'ready' is true only when every missing list is empty.",
RequestQuery: new(inframonitoringtypes.PostableOnboarding),
Response: new(inframonitoringtypes.Onboarding),
ResponseContentType: "application/json",
SuccessStatusCode: http.StatusOK,
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
Deprecated: false,
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
})).Methods(http.MethodGet).GetError(); err != nil {
return err
}
return nil
}

View File

@@ -22,6 +22,30 @@ func NewHandler(m inframonitoring.Module) inframonitoring.Handler {
}
}
func (h *handler) GetOnboarding(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
var parsedReq inframonitoringtypes.PostableOnboarding
if err := binding.Query.BindQuery(req.URL.Query(), &parsedReq); err != nil {
render.Error(rw, err)
return
}
result, err := h.module.GetOnboarding(req.Context(), orgID, &parsedReq)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusOK, result)
}
func (h *handler) ListHosts(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
@@ -45,3 +69,27 @@ func (h *handler) ListHosts(rw http.ResponseWriter, req *http.Request) {
render.Success(rw, http.StatusOK, result)
}
func (h *handler) ListPods(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
orgID := valuer.MustNewUUID(claims.OrgID)
var parsedReq inframonitoringtypes.PostablePods
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
render.Error(rw, err)
return
}
result, err := h.module.ListPods(req.Context(), orgID, &parsedReq)
if err != nil {
render.Error(rw, err)
return
}
render.Success(rw, http.StatusOK, result)
}

View File

@@ -434,6 +434,56 @@ func (m *module) getMetricsExistenceAndEarliestTime(ctx context.Context, metricN
return missingMetrics, globalMinFirstReported, nil
}
// getAttributesExistence returns the subset of attrNames that have ever been
// reported as a label on any of the given metricNames. The result maps each
// present attr name to true; attrs not in the map are absent.
// Presence is checked against distributed_metadata without a time-range
// filter — the metadata table is append-only, so an attr that ever existed
// for a metric stays visible. Callers decide "required but missing" via
// lookup against their required list.
// Sibling to getMetricsExistenceAndEarliestTime — used by the onboarding API
// to validate required resource attributes (e.g. host.name, k8s.pod.uid) for
// each infra-monitoring tab.
func (m *module) getAttributesExistence(ctx context.Context, metricNames, attrNames []string) (map[string]bool, error) {
if len(metricNames) == 0 || len(attrNames) == 0 {
return map[string]bool{}, nil
}
sb := sqlbuilder.NewSelectBuilder()
sb.Select("attr_name", "count(*) AS cnt")
sb.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, telemetrymetrics.AttributesMetadataTableName))
sb.Where(
sb.In("metric_name", sqlbuilder.List(metricNames)),
sb.In("attr_name", sqlbuilder.List(attrNames)),
)
sb.GroupBy("attr_name")
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, query, args...)
if err != nil {
return nil, err
}
defer rows.Close()
present := make(map[string]bool, len(attrNames))
for rows.Next() {
var name string
var cnt uint64
if err := rows.Scan(&name, &cnt); err != nil {
return nil, err
}
if name != "" && cnt > 0 {
present[name] = true
}
}
if err := rows.Err(); err != nil {
return nil, err
}
return present, nil
}
// getMetadata fetches the latest values of additionalCols for each unique combination of groupBy keys,
// within the given time range and metric names. It uses argMax(tuple(...), unix_milli) to ensure
// we always pick attribute values from the latest timestamp for each group.

View File

@@ -1,5 +1,7 @@
package implinframonitoring
import "github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
// The types in this file are only used within the implinframonitoring package, and are not exposed outside.
// They are primarily used for internal processing and structuring of data within the module's implementation.
@@ -14,3 +16,38 @@ type groupHostStatusCounts struct {
Active int
Inactive int
}
// onboardingComponentBucket is a single collector component's contribution
// toward a single infra-monitoring tab's readiness. Any of the three dimension
// slices (DefaultMetrics, OptionalMetrics, RequiredAttrs) may be empty — the
// bucketizer in Phase 4 skips empty dimensions.
type onboardingComponentBucket struct {
Component inframonitoringtypes.AssociatedComponent
DefaultMetrics []string
OptionalMetrics []string
RequiredAttrs []string
DocumentationLink string
}
// onboardingSpec defines, for one OnboardingType, the full set of
// component-scoped buckets that must be satisfied for the tab to be ready.
type onboardingSpec struct {
Buckets []onboardingComponentBucket
}
func (s onboardingSpec) getAllMetrics() []string {
var out []string
for _, b := range s.Buckets {
out = append(out, b.DefaultMetrics...)
out = append(out, b.OptionalMetrics...)
}
return out
}
func (s onboardingSpec) getAllAttrs() []string {
var out []string
for _, b := range s.Buckets {
out = append(out, b.RequiredAttrs...)
}
return out
}

View File

@@ -47,6 +47,76 @@ func NewModule(
}
}
// GetOnboarding runs a per-type readiness check: for the requested
// infra-monitoring tab, reports which required metrics and attributes are
// present vs missing, grouped by the collector component that produces them.
// Ready is true iff every missing list is empty.
func (m *module) GetOnboarding(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableOnboarding) (*inframonitoringtypes.Onboarding, error) {
if err := req.Validate(); err != nil {
return nil, err
}
spec, err := getSpecForType(req.Type)
if err != nil {
return nil, err
}
allMetrics := spec.getAllMetrics()
allAttrs := spec.getAllAttrs()
missingMetricsList, _, err := m.getMetricsExistenceAndEarliestTime(ctx, allMetrics)
if err != nil {
return nil, err
}
missingMetrics := make(map[string]bool, len(missingMetricsList))
for _, name := range missingMetricsList {
missingMetrics[name] = true
}
presentAttrs, err := m.getAttributesExistence(ctx, allMetrics, allAttrs)
if err != nil {
return nil, err
}
resp := &inframonitoringtypes.Onboarding{
Type: req.Type,
PresentDefaultEnabledMetrics: []inframonitoringtypes.MetricsComponentEntry{},
PresentOptionalMetrics: []inframonitoringtypes.MetricsComponentEntry{},
PresentRequiredAttributes: []inframonitoringtypes.AttributesComponentEntry{},
MissingDefaultEnabledMetrics: []inframonitoringtypes.MissingMetricsComponentEntry{},
MissingOptionalMetrics: []inframonitoringtypes.MissingMetricsComponentEntry{},
MissingRequiredAttributes: []inframonitoringtypes.MissingAttributesComponentEntry{},
}
for _, b := range spec.Buckets {
s := splitBucket(b, missingMetrics, presentAttrs)
if s.PresentDefault != nil {
resp.PresentDefaultEnabledMetrics = append(resp.PresentDefaultEnabledMetrics, *s.PresentDefault)
}
if s.PresentOptional != nil {
resp.PresentOptionalMetrics = append(resp.PresentOptionalMetrics, *s.PresentOptional)
}
if s.PresentAttrs != nil {
resp.PresentRequiredAttributes = append(resp.PresentRequiredAttributes, *s.PresentAttrs)
}
if s.MissingDefault != nil {
resp.MissingDefaultEnabledMetrics = append(resp.MissingDefaultEnabledMetrics, *s.MissingDefault)
}
if s.MissingOptional != nil {
resp.MissingOptionalMetrics = append(resp.MissingOptionalMetrics, *s.MissingOptional)
}
if s.MissingAttrs != nil {
resp.MissingRequiredAttributes = append(resp.MissingRequiredAttributes, *s.MissingAttrs)
}
}
resp.Ready = len(resp.MissingDefaultEnabledMetrics) == 0 &&
len(resp.MissingOptionalMetrics) == 0 &&
len(resp.MissingRequiredAttributes) == 0
return resp, nil
}
func (m *module) ListHosts(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableHosts) (*inframonitoringtypes.Hosts, error) {
if err := req.Validate(); err != nil {
return nil, err
@@ -159,3 +229,79 @@ func (m *module) ListHosts(ctx context.Context, orgID valuer.UUID, req *inframon
return resp, nil
}
func (m *module) ListPods(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostablePods) (*inframonitoringtypes.Pods, error) {
if err := req.Validate(); err != nil {
return nil, err
}
resp := &inframonitoringtypes.Pods{}
if req.OrderBy == nil {
req.OrderBy = &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: inframonitoringtypes.PodsOrderByCPU,
},
},
Direction: qbtypes.OrderDirectionDesc,
}
}
if len(req.GroupBy) == 0 {
req.GroupBy = []qbtypes.GroupByKey{podUIDGroupByKey}
resp.Type = inframonitoringtypes.ResponseTypeList
} else {
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
}
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, podsTableMetricNamesList)
if err != nil {
return nil, err
}
if len(missingMetrics) > 0 {
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
resp.Records = []inframonitoringtypes.PodRecord{}
resp.Total = 0
return resp, nil
}
if req.End < int64(minFirstReportedUnixMilli) {
resp.EndTimeBeforeRetention = true
resp.Records = []inframonitoringtypes.PodRecord{}
resp.Total = 0
return resp, nil
}
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: []string{}}
metadataMap, err := m.getPodsTableMetadata(ctx, req)
if err != nil {
return nil, err
}
resp.Total = len(metadataMap)
pageGroups, err := m.getTopPodGroups(ctx, orgID, req, metadataMap)
if err != nil {
return nil, err
}
if len(pageGroups) == 0 {
resp.Records = []inframonitoringtypes.PodRecord{}
return resp, nil
}
filterExpr := ""
if req.Filter != nil {
filterExpr = req.Filter.Expression
}
fullQueryReq := buildFullQueryRequest(req.Start, req.End, filterExpr, req.GroupBy, pageGroups, m.newPodsTableListQuery())
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
if err != nil {
return nil, err
}
resp.Records = buildPodRecords(queryResp, pageGroups, req.GroupBy, metadataMap, req.End)
resp.Warning = queryResp.Warning
return resp, nil
}

View File

@@ -0,0 +1,144 @@
package implinframonitoring
import (
"fmt"
"strings"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
)
// bucketSplit carries the up-to-six entries a single spec bucket contributes
// to an onboarding response. Any field may be nil if the bucket doesn't
// populate that dimension.
type bucketSplit struct {
PresentDefault *inframonitoringtypes.MetricsComponentEntry
PresentOptional *inframonitoringtypes.MetricsComponentEntry
PresentAttrs *inframonitoringtypes.AttributesComponentEntry
MissingDefault *inframonitoringtypes.MissingMetricsComponentEntry
MissingOptional *inframonitoringtypes.MissingMetricsComponentEntry
MissingAttrs *inframonitoringtypes.MissingAttributesComponentEntry
}
// splitBucket partitions one component bucket's metric and attribute lists
// against the module-wide missing/present sets into up to six response entries.
// Empty partitions are left nil so callers can skip them.
func splitBucket(b onboardingComponentBucket, missingMetrics, presentAttrs map[string]bool) bucketSplit {
var s bucketSplit
presentDef, missDef := partitionMetrics(b.DefaultMetrics, missingMetrics)
if len(presentDef) > 0 {
s.PresentDefault = &inframonitoringtypes.MetricsComponentEntry{
Metrics: presentDef,
AssociatedComponent: b.Component,
}
}
if len(missDef) > 0 {
s.MissingDefault = &inframonitoringtypes.MissingMetricsComponentEntry{
MetricsComponentEntry: inframonitoringtypes.MetricsComponentEntry{
Metrics: missDef,
AssociatedComponent: b.Component,
},
Message: buildMissingDefaultMetricsMessage(missDef, b.Component.Name),
DocumentationLink: b.DocumentationLink,
}
}
presentOpt, missOpt := partitionMetrics(b.OptionalMetrics, missingMetrics)
if len(presentOpt) > 0 {
s.PresentOptional = &inframonitoringtypes.MetricsComponentEntry{
Metrics: presentOpt,
AssociatedComponent: b.Component,
}
}
if len(missOpt) > 0 {
s.MissingOptional = &inframonitoringtypes.MissingMetricsComponentEntry{
MetricsComponentEntry: inframonitoringtypes.MetricsComponentEntry{
Metrics: missOpt,
AssociatedComponent: b.Component,
},
Message: buildMissingOptionalMetricsMessage(missOpt, b.Component.Name),
DocumentationLink: b.DocumentationLink,
}
}
presentA, missA := partitionAttrs(b.RequiredAttrs, presentAttrs)
if len(presentA) > 0 {
s.PresentAttrs = &inframonitoringtypes.AttributesComponentEntry{
Attributes: presentA,
AssociatedComponent: b.Component,
}
}
if len(missA) > 0 {
s.MissingAttrs = &inframonitoringtypes.MissingAttributesComponentEntry{
AttributesComponentEntry: inframonitoringtypes.AttributesComponentEntry{
Attributes: missA,
AssociatedComponent: b.Component,
},
Message: buildMissingRequiredAttrsMessage(missA, b.Component.Name),
DocumentationLink: b.DocumentationLink,
}
}
return s
}
// getSpecForType returns the onboardingSpec for a given OnboardingType, or an error if the type is invalid.
func getSpecForType(t inframonitoringtypes.OnboardingType) (*onboardingSpec, error) {
spec, ok := onboardingSpecs[t]
if !ok {
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "no onboarding spec for type: %s", t)
}
return &spec, nil
}
// partitionMetrics splits a metric-name list into those present (not in
// missingMetrics) and those missing (in missingMetrics). Preserves input order.
func partitionMetrics(metrics []string, missingMetrics map[string]bool) (present, missing []string) {
for _, m := range metrics {
if missingMetrics[m] {
missing = append(missing, m)
} else {
present = append(present, m)
}
}
return present, missing
}
// partitionAttrs splits a required-attrs list into present and missing based
// on the presentAttrs set returned by getAttributesPresence. Preserves input order.
func partitionAttrs(attrs []string, presentAttrs map[string]bool) (present, missing []string) {
for _, a := range attrs {
if presentAttrs[a] {
present = append(present, a)
} else {
missing = append(missing, a)
}
}
return present, missing
}
func buildMissingDefaultMetricsMessage(metrics []string, componentName string) string {
return fmt.Sprintf(
"Missing default metrics %s from %s. Learn how to configure here.",
strings.Join(metrics, ", "), componentName,
)
}
func buildMissingOptionalMetricsMessage(metrics []string, componentName string) string {
return fmt.Sprintf(
"Missing optional metrics %s from %s. Learn how to enable here.",
strings.Join(metrics, ", "), componentName,
)
}
func buildMissingRequiredAttrsMessage(attrs []string, componentName string) string {
noun := "attribute"
if len(attrs) > 1 {
noun = "attributes"
}
return fmt.Sprintf(
"Missing required %s %s from %s. Learn how to configure here.",
noun, strings.Join(attrs, ", "), componentName,
)
}

View File

@@ -0,0 +1,361 @@
package implinframonitoring
import "github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
// Component names — the 5 OTel collector receivers/processors that produce
// metrics and resource attributes consumed by infra-monitoring tabs. Bare
// strings on purpose (not wrapped enums) — the list is open-ended enough that
// an enum adds more friction than value.
const (
componentNameHostMetricsReceiver = "hostmetricsreceiver"
componentNameKubeletStatsReceiver = "kubeletstatsreceiver"
componentNameK8sClusterReceiver = "k8sclusterreceiver"
componentNameResourceDetectionProcessor = "resourcedetectionprocessor"
componentNameK8sAttributesProcessor = "k8sattributesprocessor"
)
// Documentation links — one per component. User-facing; emitted on missing-entries.
const (
docLinkHostMetricsReceiver = "https://signoz.io/docs/infrastructure-monitoring/hostmetrics/#step-2-configure-the-collector"
docLinkKubeletStatsReceiver = "https://signoz.io/docs/infrastructure-monitoring/k8s-metrics/#setting-up-kubelet-stats-monitoring"
docLinkK8sClusterReceiver = "https://signoz.io/docs/infrastructure-monitoring/k8s-metrics/#setting-up-k8s-cluster-monitoring"
docLinkResourceDetectionProcessor = "https://signoz.io/docs/infrastructure-monitoring/hostmetrics/#host-name-is-blankempty"
docLinkK8sAttributesProcessor = "https://signoz.io/docs/infrastructure-monitoring/k8s-metrics/#2-enable-kubernetes-metadata"
)
var (
componentHostMetricsReceiver = inframonitoringtypes.AssociatedComponent{
Type: inframonitoringtypes.OnboardingComponentTypeReceiver,
Name: componentNameHostMetricsReceiver,
}
componentKubeletStatsReceiver = inframonitoringtypes.AssociatedComponent{
Type: inframonitoringtypes.OnboardingComponentTypeReceiver,
Name: componentNameKubeletStatsReceiver,
}
componentK8sClusterReceiver = inframonitoringtypes.AssociatedComponent{
Type: inframonitoringtypes.OnboardingComponentTypeReceiver,
Name: componentNameK8sClusterReceiver,
}
componentResourceDetectionProcessor = inframonitoringtypes.AssociatedComponent{
Type: inframonitoringtypes.OnboardingComponentTypeProcessor,
Name: componentNameResourceDetectionProcessor,
}
componentK8sAttributesProcessor = inframonitoringtypes.AssociatedComponent{
Type: inframonitoringtypes.OnboardingComponentTypeProcessor,
Name: componentNameK8sAttributesProcessor,
}
)
// onboardingSpecs is the single lookup table the module consults for a type's
// readiness contract. Every OnboardingType value must have an entry here.
var onboardingSpecs = map[inframonitoringtypes.OnboardingType]onboardingSpec{
inframonitoringtypes.OnboardingTypeHosts: hostsSpec,
inframonitoringtypes.OnboardingTypeProcesses: processesSpec,
inframonitoringtypes.OnboardingTypePods: podsSpec,
inframonitoringtypes.OnboardingTypeNodes: nodesSpec,
inframonitoringtypes.OnboardingTypeDeployments: deploymentsSpec,
inframonitoringtypes.OnboardingTypeDaemonsets: daemonsetsSpec,
inframonitoringtypes.OnboardingTypeStatefulsets: statefulsetsSpec,
inframonitoringtypes.OnboardingTypeJobs: jobsSpec,
inframonitoringtypes.OnboardingTypeNamespaces: namespacesSpec,
inframonitoringtypes.OnboardingTypeClusters: clustersSpec,
inframonitoringtypes.OnboardingTypeVolumes: volumesSpec,
}
// Per-type specs. Every metric and attribute is spelled out in its own spec
// on purpose — no shared slices, no concatenation helpers. Repetition is
// cheaper than indirection when auditing what each tab actually requires.
var hostsSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentHostMetricsReceiver,
DefaultMetrics: []string{
"system.cpu.time",
"system.memory.usage",
"system.cpu.load_average.15m",
"system.filesystem.usage",
},
DocumentationLink: docLinkHostMetricsReceiver,
},
{
Component: componentResourceDetectionProcessor,
RequiredAttrs: []string{"host.name"},
DocumentationLink: docLinkResourceDetectionProcessor,
},
},
}
var processesSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentHostMetricsReceiver,
DefaultMetrics: []string{
"process.cpu.time",
"process.memory.usage",
},
RequiredAttrs: []string{"process.pid"},
DocumentationLink: docLinkHostMetricsReceiver,
},
},
}
var podsSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentKubeletStatsReceiver,
DefaultMetrics: []string{
"k8s.pod.cpu.usage",
"k8s.pod.memory.working_set",
},
OptionalMetrics: []string{
"k8s.pod.cpu_request_utilization",
"k8s.pod.cpu_limit_utilization",
"k8s.pod.memory_request_utilization",
"k8s.pod.memory_limit_utilization",
},
DocumentationLink: docLinkKubeletStatsReceiver,
},
{
Component: componentK8sClusterReceiver,
DefaultMetrics: []string{"k8s.pod.phase"},
DocumentationLink: docLinkK8sClusterReceiver,
},
{
Component: componentK8sAttributesProcessor,
RequiredAttrs: []string{"k8s.pod.uid"},
DocumentationLink: docLinkK8sAttributesProcessor,
},
},
}
var nodesSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentKubeletStatsReceiver,
DefaultMetrics: []string{
"k8s.node.cpu.usage",
"k8s.node.memory.working_set",
},
DocumentationLink: docLinkKubeletStatsReceiver,
},
{
Component: componentK8sClusterReceiver,
DefaultMetrics: []string{
"k8s.node.allocatable_cpu",
"k8s.node.allocatable_memory",
"k8s.node.condition_ready",
},
DocumentationLink: docLinkK8sClusterReceiver,
},
{
Component: componentK8sAttributesProcessor,
RequiredAttrs: []string{"k8s.node.name"},
DocumentationLink: docLinkK8sAttributesProcessor,
},
},
}
var deploymentsSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentKubeletStatsReceiver,
DefaultMetrics: []string{
"k8s.pod.cpu.usage",
"k8s.pod.memory.working_set",
},
OptionalMetrics: []string{
"k8s.pod.cpu_request_utilization",
"k8s.pod.cpu_limit_utilization",
"k8s.pod.memory_request_utilization",
"k8s.pod.memory_limit_utilization",
},
DocumentationLink: docLinkKubeletStatsReceiver,
},
{
Component: componentK8sClusterReceiver,
DefaultMetrics: []string{
"k8s.container.restarts",
"k8s.deployment.desired",
"k8s.deployment.available",
},
DocumentationLink: docLinkK8sClusterReceiver,
},
{
Component: componentK8sAttributesProcessor,
RequiredAttrs: []string{"k8s.deployment.name"},
DocumentationLink: docLinkK8sAttributesProcessor,
},
},
}
var daemonsetsSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentKubeletStatsReceiver,
DefaultMetrics: []string{
"k8s.pod.cpu.usage",
"k8s.pod.memory.working_set",
},
OptionalMetrics: []string{
"k8s.pod.cpu_request_utilization",
"k8s.pod.cpu_limit_utilization",
"k8s.pod.memory_request_utilization",
"k8s.pod.memory_limit_utilization",
},
DocumentationLink: docLinkKubeletStatsReceiver,
},
{
Component: componentK8sClusterReceiver,
DefaultMetrics: []string{
"k8s.container.restarts",
"k8s.daemonset.desired_scheduled_nodes",
"k8s.daemonset.current_scheduled_nodes",
},
DocumentationLink: docLinkK8sClusterReceiver,
},
{
Component: componentK8sAttributesProcessor,
RequiredAttrs: []string{"k8s.daemonset.name"},
DocumentationLink: docLinkK8sAttributesProcessor,
},
},
}
var statefulsetsSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentKubeletStatsReceiver,
DefaultMetrics: []string{
"k8s.pod.cpu.usage",
"k8s.pod.memory.working_set",
},
OptionalMetrics: []string{
"k8s.pod.cpu_request_utilization",
"k8s.pod.cpu_limit_utilization",
"k8s.pod.memory_request_utilization",
"k8s.pod.memory_limit_utilization",
},
DocumentationLink: docLinkKubeletStatsReceiver,
},
{
Component: componentK8sClusterReceiver,
DefaultMetrics: []string{
"k8s.container.restarts",
"k8s.statefulset.desired_pods",
"k8s.statefulset.current_pods",
},
DocumentationLink: docLinkK8sClusterReceiver,
},
{
Component: componentK8sAttributesProcessor,
RequiredAttrs: []string{"k8s.statefulset.name"},
DocumentationLink: docLinkK8sAttributesProcessor,
},
},
}
var jobsSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentKubeletStatsReceiver,
DefaultMetrics: []string{
"k8s.pod.cpu.usage",
"k8s.pod.memory.working_set",
},
OptionalMetrics: []string{
"k8s.pod.cpu_request_utilization",
"k8s.pod.cpu_limit_utilization",
"k8s.pod.memory_request_utilization",
"k8s.pod.memory_limit_utilization",
},
DocumentationLink: docLinkKubeletStatsReceiver,
},
{
Component: componentK8sClusterReceiver,
DefaultMetrics: []string{
"k8s.container.restarts",
"k8s.job.desired_successful_pods",
"k8s.job.active_pods",
"k8s.job.failed_pods",
"k8s.job.successful_pods",
},
DocumentationLink: docLinkK8sClusterReceiver,
},
{
Component: componentK8sAttributesProcessor,
RequiredAttrs: []string{"k8s.job.name"},
DocumentationLink: docLinkK8sAttributesProcessor,
},
},
}
var namespacesSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentKubeletStatsReceiver,
DefaultMetrics: []string{
"k8s.pod.cpu.usage",
"k8s.pod.memory.working_set",
},
DocumentationLink: docLinkKubeletStatsReceiver,
},
{
Component: componentK8sClusterReceiver,
DefaultMetrics: []string{"k8s.pod.phase"},
DocumentationLink: docLinkK8sClusterReceiver,
},
{
Component: componentK8sAttributesProcessor,
RequiredAttrs: []string{"k8s.namespace.name"},
DocumentationLink: docLinkK8sAttributesProcessor,
},
},
}
var clustersSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentKubeletStatsReceiver,
DefaultMetrics: []string{
"k8s.node.cpu.usage",
"k8s.node.memory.working_set",
},
DocumentationLink: docLinkKubeletStatsReceiver,
},
{
Component: componentK8sClusterReceiver,
DefaultMetrics: []string{
"k8s.node.allocatable_cpu",
"k8s.node.allocatable_memory",
},
DocumentationLink: docLinkK8sClusterReceiver,
},
{
Component: componentResourceDetectionProcessor,
RequiredAttrs: []string{"k8s.cluster.name"},
DocumentationLink: docLinkResourceDetectionProcessor,
},
},
}
var volumesSpec = onboardingSpec{
Buckets: []onboardingComponentBucket{
{
Component: componentKubeletStatsReceiver,
DefaultMetrics: []string{
"k8s.volume.available",
"k8s.volume.capacity",
"k8s.volume.inodes",
"k8s.volume.inodes.free",
"k8s.volume.inodes.used",
},
DocumentationLink: docLinkKubeletStatsReceiver,
},
{
Component: componentK8sAttributesProcessor,
RequiredAttrs: []string{"k8s.persistentvolumeclaim.name"},
DocumentationLink: docLinkK8sAttributesProcessor,
},
},
}

View File

@@ -0,0 +1,255 @@
package implinframonitoring
import (
"testing"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/stretchr/testify/require"
)
// Component used across splitBucket cases — it's a processor so the test
// doesn't carry any receiver semantics.
var testComponent = inframonitoringtypes.AssociatedComponent{
Type: inframonitoringtypes.OnboardingComponentTypeReceiver,
Name: "testreceiver",
}
const testDocLink = "https://example.com/docs"
func TestSplitBucket(t *testing.T) {
type want struct {
presentDefault []string
presentOptional []string
presentAttrs []string
missingDefault []string
missingOptional []string
missingAttrs []string
}
tests := []struct {
name string
bucket onboardingComponentBucket
missingMetrics map[string]bool
presentAttrs map[string]bool
want want
}{
{
name: "empty bucket — nothing to emit",
bucket: onboardingComponentBucket{Component: testComponent, DocumentationLink: testDocLink},
missingMetrics: map[string]bool{},
presentAttrs: map[string]bool{},
want: want{},
},
{
name: "all default metrics present",
bucket: onboardingComponentBucket{
Component: testComponent,
DefaultMetrics: []string{"m1", "m2"},
DocumentationLink: testDocLink,
},
missingMetrics: map[string]bool{},
presentAttrs: map[string]bool{},
want: want{
presentDefault: []string{"m1", "m2"},
},
},
{
name: "all default metrics missing",
bucket: onboardingComponentBucket{
Component: testComponent,
DefaultMetrics: []string{"m1", "m2"},
DocumentationLink: testDocLink,
},
missingMetrics: map[string]bool{"m1": true, "m2": true},
presentAttrs: map[string]bool{},
want: want{
missingDefault: []string{"m1", "m2"},
},
},
{
name: "mixed default metrics",
bucket: onboardingComponentBucket{
Component: testComponent,
DefaultMetrics: []string{"m1", "m2", "m3"},
DocumentationLink: testDocLink,
},
missingMetrics: map[string]bool{"m2": true},
presentAttrs: map[string]bool{},
want: want{
presentDefault: []string{"m1", "m3"},
missingDefault: []string{"m2"},
},
},
{
name: "only optional metrics — all missing",
bucket: onboardingComponentBucket{
Component: testComponent,
OptionalMetrics: []string{"opt1", "opt2"},
DocumentationLink: testDocLink,
},
missingMetrics: map[string]bool{"opt1": true, "opt2": true},
presentAttrs: map[string]bool{},
want: want{
missingOptional: []string{"opt1", "opt2"},
},
},
{
name: "only required attrs — all present",
bucket: onboardingComponentBucket{
Component: testComponent,
RequiredAttrs: []string{"a1", "a2"},
DocumentationLink: testDocLink,
},
missingMetrics: map[string]bool{},
presentAttrs: map[string]bool{"a1": true, "a2": true},
want: want{
presentAttrs: []string{"a1", "a2"},
},
},
{
name: "only required attrs — all missing",
bucket: onboardingComponentBucket{
Component: testComponent,
RequiredAttrs: []string{"a1"},
DocumentationLink: testDocLink,
},
missingMetrics: map[string]bool{},
presentAttrs: map[string]bool{},
want: want{
missingAttrs: []string{"a1"},
},
},
{
name: "every dimension populated on both sides",
bucket: onboardingComponentBucket{
Component: testComponent,
DefaultMetrics: []string{"d1", "d2"},
OptionalMetrics: []string{"o1", "o2"},
RequiredAttrs: []string{"a1", "a2"},
DocumentationLink: testDocLink,
},
missingMetrics: map[string]bool{"d2": true, "o1": true},
presentAttrs: map[string]bool{"a1": true},
want: want{
presentDefault: []string{"d1"},
missingDefault: []string{"d2"},
presentOptional: []string{"o2"},
missingOptional: []string{"o1"},
presentAttrs: []string{"a1"},
missingAttrs: []string{"a2"},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := splitBucket(tt.bucket, tt.missingMetrics, tt.presentAttrs)
requireMetricsEntry(t, "presentDefault", got.PresentDefault, tt.want.presentDefault, false)
requireMetricsEntry(t, "presentOptional", got.PresentOptional, tt.want.presentOptional, false)
requireAttrsEntry(t, "presentAttrs", got.PresentAttrs, tt.want.presentAttrs, false)
requireMissingMetrics(t, "missingDefault", got.MissingDefault, tt.want.missingDefault)
requireMissingMetrics(t, "missingOptional", got.MissingOptional, tt.want.missingOptional)
requireMissingAttrs(t, "missingAttrs", got.MissingAttrs, tt.want.missingAttrs)
})
}
}
func TestPartitionMetrics(t *testing.T) {
present, missing := partitionMetrics(
[]string{"a", "b", "c", "d"},
map[string]bool{"b": true, "d": true},
)
require.Equal(t, []string{"a", "c"}, present)
require.Equal(t, []string{"b", "d"}, missing)
}
func TestPartitionAttrs(t *testing.T) {
present, missing := partitionAttrs(
[]string{"a", "b", "c"},
map[string]bool{"a": true, "c": true},
)
require.Equal(t, []string{"a", "c"}, present)
require.Equal(t, []string{"b"}, missing)
}
func TestMissingMessageTemplates(t *testing.T) {
require.Equal(t,
"Missing default metrics m1, m2 from comp. Learn how to configure here.",
buildMissingDefaultMetricsMessage([]string{"m1", "m2"}, "comp"),
)
require.Equal(t,
"Missing optional metrics m1 from comp. Learn how to enable here.",
buildMissingOptionalMetricsMessage([]string{"m1"}, "comp"),
)
require.Equal(t,
"Missing required attribute a1 from comp. Learn how to configure here.",
buildMissingRequiredAttrsMessage([]string{"a1"}, "comp"),
)
require.Equal(t,
"Missing required attributes a1, a2 from comp. Learn how to configure here.",
buildMissingRequiredAttrsMessage([]string{"a1", "a2"}, "comp"),
)
}
// TestOnboardingSpecs_CoverAllTypes ensures the spec map has an entry for
// every OnboardingType — prevents silently shipping an onboarding type that
// has no spec and would 500 at runtime.
func TestOnboardingSpecs_CoverAllTypes(t *testing.T) {
for _, tp := range inframonitoringtypes.ValidOnboardingTypes {
_, ok := onboardingSpecs[tp]
require.True(t, ok, "missing onboarding spec for type %s", tp)
}
require.Len(t, onboardingSpecs, len(inframonitoringtypes.ValidOnboardingTypes))
}
// --- helpers ---
func requireMetricsEntry(t *testing.T, name string, got *inframonitoringtypes.MetricsComponentEntry, wantMetrics []string, _ bool) {
t.Helper()
if len(wantMetrics) == 0 {
require.Nil(t, got, name)
return
}
require.NotNil(t, got, name)
require.Equal(t, wantMetrics, got.Metrics, name)
require.Equal(t, testComponent, got.AssociatedComponent, name)
}
func requireAttrsEntry(t *testing.T, name string, got *inframonitoringtypes.AttributesComponentEntry, wantAttrs []string, _ bool) {
t.Helper()
if len(wantAttrs) == 0 {
require.Nil(t, got, name)
return
}
require.NotNil(t, got, name)
require.Equal(t, wantAttrs, got.Attributes, name)
require.Equal(t, testComponent, got.AssociatedComponent, name)
}
func requireMissingMetrics(t *testing.T, name string, got *inframonitoringtypes.MissingMetricsComponentEntry, wantMetrics []string) {
t.Helper()
if len(wantMetrics) == 0 {
require.Nil(t, got, name)
return
}
require.NotNil(t, got, name)
require.Equal(t, wantMetrics, got.Metrics, name)
require.Equal(t, testComponent, got.AssociatedComponent, name)
require.NotEmpty(t, got.Message, name)
require.Equal(t, testDocLink, got.DocumentationLink, name)
}
func requireMissingAttrs(t *testing.T, name string, got *inframonitoringtypes.MissingAttributesComponentEntry, wantAttrs []string) {
t.Helper()
if len(wantAttrs) == 0 {
require.Nil(t, got, name)
return
}
require.NotNil(t, got, name)
require.Equal(t, wantAttrs, got.Attributes, name)
require.Equal(t, testComponent, got.AssociatedComponent, name)
require.NotEmpty(t, got.Message, name)
require.Equal(t, testDocLink, got.DocumentationLink, name)
}

View File

@@ -0,0 +1,160 @@
package implinframonitoring
import (
"context"
"slices"
"time"
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/valuer"
)
func mapPhaseNumToString(v float64) inframonitoringtypes.PodPhase {
switch int(v) {
case 1:
return inframonitoringtypes.PodPhasePending
case 2:
return inframonitoringtypes.PodPhaseRunning
case 3:
return inframonitoringtypes.PodPhaseSucceeded
case 4:
return inframonitoringtypes.PodPhaseFailed
case 5:
return inframonitoringtypes.PodPhaseUnknown
default:
return inframonitoringtypes.PodPhaseNone
}
}
func (m *module) getTopPodGroups(
ctx context.Context,
orgID valuer.UUID,
req *inframonitoringtypes.PostablePods,
metadataMap map[string]map[string]string,
) ([]map[string]string, error) {
orderByKey := req.OrderBy.Key.Name
queryNamesForOrderBy := orderByToPodsQueryNames[orderByKey]
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
topReq := &qbtypes.QueryRangeRequest{
Start: uint64(req.Start),
End: uint64(req.End),
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
},
}
for _, envelope := range m.newPodsTableListQuery().CompositeQuery.Queries {
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
continue
}
copied := envelope
if copied.Type == qbtypes.QueryTypeBuilder {
existingExpr := ""
if f := copied.GetFilter(); f != nil {
existingExpr = f.Expression
}
reqFilterExpr := ""
if req.Filter != nil {
reqFilterExpr = req.Filter.Expression
}
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
copied.SetFilter(&qbtypes.Filter{Expression: merged})
copied.SetGroupBy(req.GroupBy)
}
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
}
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
if err != nil {
return nil, err
}
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
}
func (m *module) getPodsTableMetadata(ctx context.Context, req *inframonitoringtypes.PostablePods) (map[string]map[string]string, error) {
var nonGroupByAttrs []string
for _, key := range podAttrKeysForMetadata {
if !isKeyInGroupByAttrs(req.GroupBy, key) {
nonGroupByAttrs = append(nonGroupByAttrs, key)
}
}
return m.getMetadata(ctx, podsTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
}
func buildPodRecords(
resp *qbtypes.QueryRangeResponse,
pageGroups []map[string]string,
groupBy []qbtypes.GroupByKey,
metadataMap map[string]map[string]string,
reqEnd int64,
) []inframonitoringtypes.PodRecord {
metricsMap := parseFullQueryResponse(resp, groupBy)
records := make([]inframonitoringtypes.PodRecord, 0, len(pageGroups))
for _, labels := range pageGroups {
compositeKey := compositeKeyFromLabels(labels, groupBy)
podUID := labels[podUIDAttrKey]
record := inframonitoringtypes.PodRecord{ // initialize with default values
PodUID: podUID,
PodPhase: inframonitoringtypes.PodPhaseNone,
PodCPU: -1,
PodCPURequest: -1,
PodCPULimit: -1,
PodMemory: -1,
PodMemoryRequest: -1,
PodMemoryLimit: -1,
PodAge: -1,
Meta: map[string]any{},
}
if metrics, ok := metricsMap[compositeKey]; ok {
if v, exists := metrics["A"]; exists {
record.PodCPU = v
}
if v, exists := metrics["B"]; exists {
record.PodCPURequest = v
}
if v, exists := metrics["C"]; exists {
record.PodCPULimit = v
}
if v, exists := metrics["D"]; exists {
record.PodMemory = v
}
if v, exists := metrics["E"]; exists {
record.PodMemoryRequest = v
}
if v, exists := metrics["F"]; exists {
record.PodMemoryLimit = v
}
if v, exists := metrics["G"]; exists {
record.PodPhase = mapPhaseNumToString(v)
}
}
if attrs, ok := metadataMap[compositeKey]; ok && isKeyInGroupByAttrs(groupBy, podUID) {
// the condition above ensures we deduce age only if pod uid is in group by because if
// it's not in group by then we might have multiple pod uids in the same group and hence then podAge wont make sense
if startTimeStr, exists := attrs[podStartTimeAttrKey]; exists && startTimeStr != "" {
if t, err := time.Parse(time.RFC3339, startTimeStr); err == nil {
startTimeMs := t.UnixMilli()
if startTimeMs > 0 {
record.PodAge = reqEnd - startTimeMs
}
}
}
for k, v := range attrs {
record.Meta[k] = v
}
}
records = append(records, record)
}
return records
}

View File

@@ -0,0 +1,194 @@
package implinframonitoring
import (
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
"github.com/SigNoz/signoz/pkg/types/metrictypes"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
)
const (
podUIDAttrKey = "k8s.pod.uid"
podStartTimeAttrKey = "k8s.pod.start_time"
)
var podUIDGroupByKey = qbtypes.GroupByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: podUIDAttrKey,
FieldContext: telemetrytypes.FieldContextResource,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
}
var podsTableMetricNamesList = []string{
"k8s.pod.cpu.usage",
"k8s.pod.cpu_request_utilization",
"k8s.pod.cpu_limit_utilization",
"k8s.pod.memory.working_set",
"k8s.pod.memory_request_utilization",
"k8s.pod.memory_limit_utilization",
"k8s.pod.phase",
}
var podAttrKeysForMetadata = []string{
"k8s.pod.uid",
"k8s.pod.name",
"k8s.namespace.name",
"k8s.node.name",
"k8s.deployment.name",
"k8s.statefulset.name",
"k8s.daemonset.name",
"k8s.job.name",
"k8s.cronjob.name",
"k8s.cluster.name",
"k8s.pod.start_time",
}
var orderByToPodsQueryNames = map[string][]string{
inframonitoringtypes.PodsOrderByCPU: {"A"},
inframonitoringtypes.PodsOrderByCPURequest: {"B"},
inframonitoringtypes.PodsOrderByCPULimit: {"C"},
inframonitoringtypes.PodsOrderByMemory: {"D"},
inframonitoringtypes.PodsOrderByMemoryRequest: {"E"},
inframonitoringtypes.PodsOrderByMemoryLimit: {"F"},
}
func (m *module) newPodsTableListQuery() *qbtypes.QueryRangeRequest {
return &qbtypes.QueryRangeRequest{
RequestType: qbtypes.RequestTypeScalar,
CompositeQuery: qbtypes.CompositeQuery{
Queries: []qbtypes.QueryEnvelope{
// Query A: CPU usage
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "A",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.cpu.usage",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query B: CPU request utilization
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "B",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.cpu_request_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query C: CPU limit utilization
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "C",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.cpu_limit_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query D: Memory working set
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "D",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.memory.working_set",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationSum,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query E: Memory request utilization
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "E",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.memory_request_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query F: Memory limit utilization
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "F",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.memory_limit_utilization",
TimeAggregation: metrictypes.TimeAggregationAvg,
SpaceAggregation: metrictypes.SpaceAggregationAvg,
ReduceTo: qbtypes.ReduceToAvg,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
// Query G: Pod phase (latest value per pod)
{
Type: qbtypes.QueryTypeBuilder,
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
Name: "G",
Signal: telemetrytypes.SignalMetrics,
Aggregations: []qbtypes.MetricAggregation{
{
MetricName: "k8s.pod.phase",
TimeAggregation: metrictypes.TimeAggregationLatest,
SpaceAggregation: metrictypes.SpaceAggregationMax, // Space aggregation here should ideally have been something
// like spaceaggregation last to pick up the last seen phase of the pod in case of more than one fingerprint for a pod uid for pod phase.
// but since the chance of that happening is very low and we don't have spaceaggregation last,
// we have picked max which will give us the correct phase in majority of cases and in worst case will give us the most severe phase in case of multiple fingerprints for a pod uid for pod phase.
// TODO(nikhilmantri0902): Add spaceaggregation last once supported and use that here.
ReduceTo: qbtypes.ReduceToLast,
},
},
GroupBy: []qbtypes.GroupByKey{podUIDGroupByKey},
Disabled: false,
},
},
},
},
}
}

View File

@@ -10,8 +10,12 @@ import (
type Handler interface {
ListHosts(http.ResponseWriter, *http.Request)
ListPods(http.ResponseWriter, *http.Request)
GetOnboarding(http.ResponseWriter, *http.Request)
}
type Module interface {
ListHosts(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableHosts) (*inframonitoringtypes.Hosts, error)
ListPods(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostablePods) (*inframonitoringtypes.Pods, error)
GetOnboarding(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableOnboarding) (*inframonitoringtypes.Onboarding, error)
}

View File

@@ -49,7 +49,7 @@ type HostFilter struct {
FilterByStatus HostStatus `json:"filterByStatus"`
}
// Validate ensures HostsListRequest contains acceptable values.
// Validate ensures PostableHosts contains acceptable values.
func (req *PostableHosts) Validate() error {
if req == nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")

View File

@@ -0,0 +1,83 @@
package inframonitoringtypes
import (
"slices"
"github.com/SigNoz/signoz/pkg/errors"
)
// PostableOnboarding is the request for GET /api/v2/infra_monitoring/onboarding.
// The single `type` query param selects which infra-monitoring subsection the
// readiness check runs for.
type PostableOnboarding struct {
Type OnboardingType `query:"type" required:"true"`
}
// Validate rejects empty/unknown onboarding types.
func (req *PostableOnboarding) Validate() error {
if req == nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
}
if req.Type.IsZero() {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "type is required")
}
if !slices.Contains(ValidOnboardingTypes, req.Type) {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid type: %s", req.Type)
}
return nil
}
// Onboarding is the response for GET /api/v2/infra_monitoring/onboarding.
//
// The three present/missing pairs partition a type's requirements into three
// dimensions — default-enabled metrics, optional metrics, required attributes —
// each bucketed by the collector component (receiver or processor) that
// produces it. Ready is true iff every Missing* array is empty.
type Onboarding struct {
Type OnboardingType `json:"type" required:"true"`
Ready bool `json:"ready" required:"true"`
PresentDefaultEnabledMetrics []MetricsComponentEntry `json:"presentDefaultEnabledMetrics" required:"true"`
PresentOptionalMetrics []MetricsComponentEntry `json:"presentOptionalMetrics" required:"true"`
PresentRequiredAttributes []AttributesComponentEntry `json:"presentRequiredAttributes" required:"true"`
MissingDefaultEnabledMetrics []MissingMetricsComponentEntry `json:"missingDefaultEnabledMetrics" required:"true"`
MissingOptionalMetrics []MissingMetricsComponentEntry `json:"missingOptionalMetrics" required:"true"`
MissingRequiredAttributes []MissingAttributesComponentEntry `json:"missingRequiredAttributes" required:"true"`
}
// AssociatedComponent identifies the collector receiver or processor that a
// metric or attribute originates from. Name is free-form (e.g. "kubeletstatsreceiver").
type AssociatedComponent struct {
Type OnboardingComponentType `json:"type" required:"true"`
Name string `json:"name" required:"true"`
}
// MetricsComponentEntry lists metrics that share a single associated component.
type MetricsComponentEntry struct {
Metrics []string `json:"metrics" required:"true"`
AssociatedComponent AssociatedComponent `json:"associatedComponent" required:"true"`
}
// AttributesComponentEntry lists resource attributes that share a single associated component.
type AttributesComponentEntry struct {
Attributes []string `json:"attributes" required:"true"`
AssociatedComponent AssociatedComponent `json:"associatedComponent" required:"true"`
}
// MissingMetricsComponentEntry extends MetricsComponentEntry with a user-facing
// message and a docs link for fixing the missing metrics.
type MissingMetricsComponentEntry struct {
MetricsComponentEntry
Message string `json:"message" required:"true"`
DocumentationLink string `json:"documentationLink" required:"true"`
}
// MissingAttributesComponentEntry extends AttributesComponentEntry with a user-facing
// message and a docs link for fixing the missing attributes.
type MissingAttributesComponentEntry struct {
AttributesComponentEntry
Message string `json:"message" required:"true"`
DocumentationLink string `json:"documentationLink" required:"true"`
}

View File

@@ -0,0 +1,71 @@
package inframonitoringtypes
import "github.com/SigNoz/signoz/pkg/valuer"
// OnboardingType identifies a single infra-monitoring subsection (UI tab).
// One value per v1/v2 list API we surface in the infra-monitoring section.
type OnboardingType struct {
valuer.String
}
var (
OnboardingTypeHosts = OnboardingType{valuer.NewString("hosts")}
OnboardingTypeProcesses = OnboardingType{valuer.NewString("processes")}
OnboardingTypePods = OnboardingType{valuer.NewString("pods")}
OnboardingTypeNodes = OnboardingType{valuer.NewString("nodes")}
OnboardingTypeDeployments = OnboardingType{valuer.NewString("deployments")}
OnboardingTypeDaemonsets = OnboardingType{valuer.NewString("daemonsets")}
OnboardingTypeStatefulsets = OnboardingType{valuer.NewString("statefulsets")}
OnboardingTypeJobs = OnboardingType{valuer.NewString("jobs")}
OnboardingTypeNamespaces = OnboardingType{valuer.NewString("namespaces")}
OnboardingTypeClusters = OnboardingType{valuer.NewString("clusters")}
OnboardingTypeVolumes = OnboardingType{valuer.NewString("volumes")}
)
func (OnboardingType) Enum() []any {
return []any{
OnboardingTypeHosts,
OnboardingTypeProcesses,
OnboardingTypePods,
OnboardingTypeNodes,
OnboardingTypeDeployments,
OnboardingTypeDaemonsets,
OnboardingTypeStatefulsets,
OnboardingTypeJobs,
OnboardingTypeNamespaces,
OnboardingTypeClusters,
OnboardingTypeVolumes,
}
}
var ValidOnboardingTypes = []OnboardingType{
OnboardingTypeHosts,
OnboardingTypeProcesses,
OnboardingTypePods,
OnboardingTypeNodes,
OnboardingTypeDeployments,
OnboardingTypeDaemonsets,
OnboardingTypeStatefulsets,
OnboardingTypeJobs,
OnboardingTypeNamespaces,
OnboardingTypeClusters,
OnboardingTypeVolumes,
}
// OnboardingComponentType tags each AssociatedComponent as either a receiver or a processor.
// Only these two values are ever written by the module.
type OnboardingComponentType struct {
valuer.String
}
var (
OnboardingComponentTypeReceiver = OnboardingComponentType{valuer.NewString("receiver")}
OnboardingComponentTypeProcessor = OnboardingComponentType{valuer.NewString("processor")}
)
func (OnboardingComponentType) Enum() []any {
return []any{
OnboardingComponentTypeReceiver,
OnboardingComponentTypeProcessor,
}
}

View File

@@ -0,0 +1,110 @@
package inframonitoringtypes
import (
"testing"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/stretchr/testify/require"
)
func TestPostableOnboarding_Validate(t *testing.T) {
tests := []struct {
name string
req *PostableOnboarding
wantErr bool
}{
{
name: "nil request",
req: nil,
wantErr: true,
},
{
name: "empty type",
req: &PostableOnboarding{},
wantErr: true,
},
{
name: "unknown type",
req: &PostableOnboarding{Type: OnboardingType{valuer.NewString("foo")}},
wantErr: true,
},
{
name: "hosts",
req: &PostableOnboarding{Type: OnboardingTypeHosts},
wantErr: false,
},
{
name: "processes",
req: &PostableOnboarding{Type: OnboardingTypeProcesses},
wantErr: false,
},
{
name: "pods",
req: &PostableOnboarding{Type: OnboardingTypePods},
wantErr: false,
},
{
name: "nodes",
req: &PostableOnboarding{Type: OnboardingTypeNodes},
wantErr: false,
},
{
name: "deployments",
req: &PostableOnboarding{Type: OnboardingTypeDeployments},
wantErr: false,
},
{
name: "daemonsets",
req: &PostableOnboarding{Type: OnboardingTypeDaemonsets},
wantErr: false,
},
{
name: "statefulsets",
req: &PostableOnboarding{Type: OnboardingTypeStatefulsets},
wantErr: false,
},
{
name: "jobs",
req: &PostableOnboarding{Type: OnboardingTypeJobs},
wantErr: false,
},
{
name: "namespaces",
req: &PostableOnboarding{Type: OnboardingTypeNamespaces},
wantErr: false,
},
{
name: "clusters",
req: &PostableOnboarding{Type: OnboardingTypeClusters},
wantErr: false,
},
{
name: "volumes",
req: &PostableOnboarding{Type: OnboardingTypeVolumes},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.req.Validate()
if tt.wantErr {
require.Error(t, err)
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
} else {
require.NoError(t, err)
}
})
}
}
// TestValidOnboardingTypes_MatchesEnum ensures the ValidOnboardingTypes slice
// stays in sync with the Enum() list — both must cover every OnboardingType value.
func TestValidOnboardingTypes_MatchesEnum(t *testing.T) {
enum := OnboardingType{}.Enum()
require.Equal(t, len(enum), len(ValidOnboardingTypes))
for i, v := range enum {
require.Equal(t, v, ValidOnboardingTypes[i])
}
}

View File

@@ -0,0 +1,104 @@
package inframonitoringtypes
import (
"encoding/json"
"slices"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
)
type Pods struct {
Type ResponseType `json:"type" required:"true"`
Records []PodRecord `json:"records" required:"true"`
Total int `json:"total" required:"true"`
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck" required:"true"`
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention" required:"true"`
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
}
type PodRecord struct {
PodUID string `json:"podUID" required:"true"`
PodCPU float64 `json:"podCPU" required:"true"`
PodCPURequest float64 `json:"podCPURequest" required:"true"`
PodCPULimit float64 `json:"podCPULimit" required:"true"`
PodMemory float64 `json:"podMemory" required:"true"`
PodMemoryRequest float64 `json:"podMemoryRequest" required:"true"`
PodMemoryLimit float64 `json:"podMemoryLimit" required:"true"`
PodPhase PodPhase `json:"podPhase" required:"true"`
PodAge int64 `json:"podAge" required:"true"`
Meta map[string]interface{} `json:"meta" required:"true"`
}
// PostablePods is the request body for the v2 pods list API.
type PostablePods struct {
Start int64 `json:"start" required:"true"`
End int64 `json:"end" required:"true"`
Filter *qbtypes.Filter `json:"filter"`
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
OrderBy *qbtypes.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit" required:"true"`
}
// Validate ensures PostablePods contains acceptable values.
func (req *PostablePods) Validate() error {
if req == nil {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
}
if req.Start <= 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid start time %d: start must be greater than 0",
req.Start,
)
}
if req.End <= 0 {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid end time %d: end must be greater than 0",
req.End,
)
}
if req.Start >= req.End {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"invalid time range: start (%d) must be less than end (%d)",
req.Start,
req.End,
)
}
if req.Limit < 1 || req.Limit > 5000 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
}
if req.Offset < 0 {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
}
if req.OrderBy != nil {
if !slices.Contains(PodsValidOrderByKeys, req.OrderBy.Key.Name) {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
}
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
}
}
return nil
}
// UnmarshalJSON validates input immediately after decoding.
func (req *PostablePods) UnmarshalJSON(data []byte) error {
type raw PostablePods
var decoded raw
if err := json.Unmarshal(data, &decoded); err != nil {
return err
}
*req = PostablePods(decoded)
return req.Validate()
}

View File

@@ -0,0 +1,45 @@
package inframonitoringtypes
import "github.com/SigNoz/signoz/pkg/valuer"
type PodPhase struct {
valuer.String
}
var (
PodPhasePending = PodPhase{valuer.NewString("pending")}
PodPhaseRunning = PodPhase{valuer.NewString("running")}
PodPhaseSucceeded = PodPhase{valuer.NewString("succeeded")}
PodPhaseFailed = PodPhase{valuer.NewString("failed")}
PodPhaseUnknown = PodPhase{valuer.NewString("unknown")}
PodPhaseNone = PodPhase{valuer.NewString("")}
)
func (PodPhase) Enum() []any {
return []any{
PodPhasePending,
PodPhaseRunning,
PodPhaseSucceeded,
PodPhaseFailed,
PodPhaseUnknown,
PodPhaseNone,
}
}
const (
PodsOrderByCPU = "cpu"
PodsOrderByCPURequest = "cpu_request"
PodsOrderByCPULimit = "cpu_limit"
PodsOrderByMemory = "memory"
PodsOrderByMemoryRequest = "memory_request"
PodsOrderByMemoryLimit = "memory_limit"
)
var PodsValidOrderByKeys = []string{
PodsOrderByCPU,
PodsOrderByCPURequest,
PodsOrderByCPULimit,
PodsOrderByMemory,
PodsOrderByMemoryRequest,
PodsOrderByMemoryLimit,
}

View File

@@ -0,0 +1,219 @@
package inframonitoringtypes
import (
"testing"
"github.com/SigNoz/signoz/pkg/errors"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/stretchr/testify/require"
)
func TestPostablePods_Validate(t *testing.T) {
tests := []struct {
name string
req *PostablePods
wantErr bool
}{
{
name: "valid request",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: false,
},
{
name: "nil request",
req: nil,
wantErr: true,
},
{
name: "start time zero",
req: &PostablePods{
Start: 0,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time negative",
req: &PostablePods{
Start: -1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "end time zero",
req: &PostablePods{
Start: 1000,
End: 0,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time greater than end time",
req: &PostablePods{
Start: 2000,
End: 1000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "start time equal to end time",
req: &PostablePods{
Start: 1000,
End: 1000,
Limit: 100,
Offset: 0,
},
wantErr: true,
},
{
name: "limit zero",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 0,
Offset: 0,
},
wantErr: true,
},
{
name: "limit negative",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: -10,
Offset: 0,
},
wantErr: true,
},
{
name: "limit exceeds max",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 5001,
Offset: 0,
},
wantErr: true,
},
{
name: "offset negative",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: -5,
},
wantErr: true,
},
{
name: "orderBy nil is valid",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
},
wantErr: false,
},
{
name: "orderBy with valid key cpu and direction asc",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: PodsOrderByCPU,
},
},
Direction: qbtypes.OrderDirectionAsc,
},
},
wantErr: false,
},
{
name: "orderBy with phase key is invalid",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "phase",
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: true,
},
{
name: "orderBy with invalid key",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: "unknown",
},
},
Direction: qbtypes.OrderDirectionDesc,
},
},
wantErr: true,
},
{
name: "orderBy with valid key but invalid direction",
req: &PostablePods{
Start: 1000,
End: 2000,
Limit: 100,
Offset: 0,
OrderBy: &qbtypes.OrderBy{
Key: qbtypes.OrderByKey{
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
Name: PodsOrderByMemory,
},
},
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
},
},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
err := tt.req.Validate()
if tt.wantErr {
require.Error(t, err)
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
} else {
require.NoError(t, err)
}
})
}
}