mirror of
https://github.com/SigNoz/signoz.git
synced 2026-05-15 14:40:30 +01:00
Compare commits
5 Commits
chore/tool
...
fix/downti
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
99bdee8ee3 | ||
|
|
d2f3659df2 | ||
|
|
1e30158034 | ||
|
|
72a58c634b | ||
|
|
e2583b135c |
53
cmd/authz.go
53
cmd/authz.go
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/coretypes"
|
||||
@@ -23,6 +24,7 @@ export default {
|
||||
{
|
||||
kind: '{{ .Kind }}',
|
||||
type: '{{ .Type }}',
|
||||
{{ .FormattedAllowedVerbs }}
|
||||
},
|
||||
{{- end }}
|
||||
],
|
||||
@@ -41,8 +43,9 @@ type permissionsTypeRelation struct {
|
||||
}
|
||||
|
||||
type permissionsTypeResource struct {
|
||||
Kind string
|
||||
Type string
|
||||
Kind string
|
||||
Type string
|
||||
FormattedAllowedVerbs string
|
||||
}
|
||||
|
||||
type permissionsTypeData struct {
|
||||
@@ -50,6 +53,30 @@ type permissionsTypeData struct {
|
||||
Relations []permissionsTypeRelation
|
||||
}
|
||||
|
||||
// formatAllowedVerbs returns a prettier-compatible formatted allowedVerbs line.
|
||||
// indentLevel is the number of tabs for the property (matching kind/type indent).
|
||||
// printWidth is prettier's printWidth; tabWidth is assumed to be 1 (each \t = 1 char).
|
||||
func formatAllowedVerbs(verbs []string, indentLevel int, printWidth int) string {
|
||||
quoted := make([]string, len(verbs))
|
||||
for i, v := range verbs {
|
||||
quoted[i] = "'" + v + "'"
|
||||
}
|
||||
indent := strings.Repeat("\t", indentLevel)
|
||||
|
||||
oneLine := indent + "allowedVerbs: [" + strings.Join(quoted, ", ") + "],"
|
||||
if len(oneLine) <= printWidth {
|
||||
return oneLine
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.WriteString(indent + "allowedVerbs: [\n")
|
||||
for _, q := range quoted {
|
||||
b.WriteString(indent + "\t" + q + ",\n")
|
||||
}
|
||||
b.WriteString(indent + "],")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func registerGenerateAuthz(parentCmd *cobra.Command) {
|
||||
authzCmd := &cobra.Command{
|
||||
Use: "authz",
|
||||
@@ -66,8 +93,8 @@ func runGenerateAuthz(_ context.Context) error {
|
||||
registry := coretypes.NewRegistry()
|
||||
|
||||
allowedResources := map[string]bool{
|
||||
coretypes.NewResourceRef(coretypes.ResourceServiceAccount).String(): true,
|
||||
coretypes.NewResourceRef(coretypes.ResourceRole).String(): true,
|
||||
coretypes.NewResourceRef(coretypes.ResourceServiceAccount).String(): true,
|
||||
coretypes.NewResourceRef(coretypes.ResourceRole).String(): true,
|
||||
coretypes.NewResourceRef(coretypes.ResourceMetaResourceFactorAPIKey).String(): true,
|
||||
}
|
||||
|
||||
@@ -80,9 +107,23 @@ func runGenerateAuthz(_ context.Context) error {
|
||||
continue
|
||||
}
|
||||
allowedTypes[ref.Type.StringValue()] = true
|
||||
|
||||
resource, err := coretypes.NewResourceFromTypeAndKind(ref.Type, ref.Kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
verbs := resource.AllowedVerbs()
|
||||
allowedVerbStrings := make([]string, 0, len(verbs))
|
||||
for _, verb := range verbs {
|
||||
allowedVerbStrings = append(allowedVerbStrings, verb.StringValue())
|
||||
}
|
||||
sort.Strings(allowedVerbStrings)
|
||||
|
||||
resources = append(resources, permissionsTypeResource{
|
||||
Kind: ref.Kind.String(),
|
||||
Type: ref.Type.StringValue(),
|
||||
Kind: ref.Kind.String(),
|
||||
Type: ref.Type.StringValue(),
|
||||
FormattedAllowedVerbs: formatAllowedVerbs(allowedVerbStrings, 4, 80),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -190,7 +190,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.122.0
|
||||
image: signoz/signoz:v0.124.0
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
# - "6060:6060" # pprof port
|
||||
|
||||
@@ -117,7 +117,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.122.0
|
||||
image: signoz/signoz:v0.124.0
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
volumes:
|
||||
|
||||
@@ -181,7 +181,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.122.0}
|
||||
image: signoz/signoz:${VERSION:-v0.124.0}
|
||||
container_name: signoz
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
|
||||
@@ -109,7 +109,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.122.0}
|
||||
image: signoz/signoz:${VERSION:-v0.124.0}
|
||||
container_name: signoz
|
||||
ports:
|
||||
- "8080:8080" # signoz port
|
||||
|
||||
@@ -2580,6 +2580,76 @@ components:
|
||||
- requiredMetricsCheck
|
||||
- endTimeBeforeRetention
|
||||
type: object
|
||||
InframonitoringtypesDaemonSetRecord:
|
||||
properties:
|
||||
currentNodes:
|
||||
type: integer
|
||||
daemonSetCPU:
|
||||
format: double
|
||||
type: number
|
||||
daemonSetCPULimit:
|
||||
format: double
|
||||
type: number
|
||||
daemonSetCPURequest:
|
||||
format: double
|
||||
type: number
|
||||
daemonSetMemory:
|
||||
format: double
|
||||
type: number
|
||||
daemonSetMemoryLimit:
|
||||
format: double
|
||||
type: number
|
||||
daemonSetMemoryRequest:
|
||||
format: double
|
||||
type: number
|
||||
daemonSetName:
|
||||
type: string
|
||||
desiredNodes:
|
||||
type: integer
|
||||
meta:
|
||||
additionalProperties:
|
||||
type: string
|
||||
nullable: true
|
||||
type: object
|
||||
podCountsByPhase:
|
||||
$ref: '#/components/schemas/InframonitoringtypesPodCountsByPhase'
|
||||
required:
|
||||
- daemonSetName
|
||||
- daemonSetCPU
|
||||
- daemonSetCPURequest
|
||||
- daemonSetCPULimit
|
||||
- daemonSetMemory
|
||||
- daemonSetMemoryRequest
|
||||
- daemonSetMemoryLimit
|
||||
- desiredNodes
|
||||
- currentNodes
|
||||
- podCountsByPhase
|
||||
- meta
|
||||
type: object
|
||||
InframonitoringtypesDaemonSets:
|
||||
properties:
|
||||
endTimeBeforeRetention:
|
||||
type: boolean
|
||||
records:
|
||||
items:
|
||||
$ref: '#/components/schemas/InframonitoringtypesDaemonSetRecord'
|
||||
nullable: true
|
||||
type: array
|
||||
requiredMetricsCheck:
|
||||
$ref: '#/components/schemas/InframonitoringtypesRequiredMetricsCheck'
|
||||
total:
|
||||
type: integer
|
||||
type:
|
||||
$ref: '#/components/schemas/InframonitoringtypesResponseType'
|
||||
warning:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5QueryWarnData'
|
||||
required:
|
||||
- type
|
||||
- records
|
||||
- total
|
||||
- requiredMetricsCheck
|
||||
- endTimeBeforeRetention
|
||||
type: object
|
||||
InframonitoringtypesDeploymentRecord:
|
||||
properties:
|
||||
availablePods:
|
||||
@@ -3056,6 +3126,32 @@ components:
|
||||
- end
|
||||
- limit
|
||||
type: object
|
||||
InframonitoringtypesPostableDaemonSets:
|
||||
properties:
|
||||
end:
|
||||
format: int64
|
||||
type: integer
|
||||
filter:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5Filter'
|
||||
groupBy:
|
||||
items:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5GroupByKey'
|
||||
nullable: true
|
||||
type: array
|
||||
limit:
|
||||
type: integer
|
||||
offset:
|
||||
type: integer
|
||||
orderBy:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
|
||||
start:
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- start
|
||||
- end
|
||||
- limit
|
||||
type: object
|
||||
InframonitoringtypesPostableDeployments:
|
||||
properties:
|
||||
end:
|
||||
@@ -12275,6 +12371,83 @@ paths:
|
||||
summary: List Clusters for Infra Monitoring
|
||||
tags:
|
||||
- inframonitoring
|
||||
/api/v2/infra_monitoring/daemonsets:
|
||||
post:
|
||||
deprecated: false
|
||||
description: 'Returns a paginated list of Kubernetes DaemonSets with key aggregated
|
||||
pod metrics: CPU usage and memory working set summed across pods owned by
|
||||
the daemonset, plus average CPU/memory request and limit utilization (daemonSetCPURequest,
|
||||
daemonSetCPULimit, daemonSetMemoryRequest, daemonSetMemoryLimit). Each row
|
||||
also reports the latest known node-level counters from kube-state-metrics:
|
||||
desiredNodes (k8s.daemonset.desired_scheduled_nodes, the number of nodes the
|
||||
daemonset wants to run on) and currentNodes (k8s.daemonset.current_scheduled_nodes,
|
||||
the number of nodes the daemonset currently runs on) — note these are node
|
||||
counts, not pod counts. It also reports per-group podCountsByPhase ({ pending,
|
||||
running, succeeded, failed, unknown } from each pod''s latest k8s.pod.phase
|
||||
value). Each daemonset includes metadata attributes (k8s.daemonset.name, k8s.namespace.name,
|
||||
k8s.cluster.name). The response type is ''list'' for the default k8s.daemonset.name
|
||||
grouping or ''grouped_list'' for custom groupBy keys; in both modes every
|
||||
row aggregates pods owned by daemonsets in the group. Supports filtering via
|
||||
a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit
|
||||
/ memory / memory_request / memory_limit / desired_nodes / current_nodes,
|
||||
and pagination via offset/limit. Also reports missing required metrics and
|
||||
whether the requested time range falls before the data retention boundary.
|
||||
Numeric metric fields (daemonSetCPU, daemonSetCPURequest, daemonSetCPULimit,
|
||||
daemonSetMemory, daemonSetMemoryRequest, daemonSetMemoryLimit, desiredNodes,
|
||||
currentNodes) return -1 as a sentinel when no data is available for that field.'
|
||||
operationId: ListDaemonSets
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InframonitoringtypesPostableDaemonSets'
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/InframonitoringtypesDaemonSets'
|
||||
status:
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
- data
|
||||
type: object
|
||||
description: OK
|
||||
"400":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Bad Request
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- VIEWER
|
||||
- tokenizer:
|
||||
- VIEWER
|
||||
summary: List DaemonSets for Infra Monitoring
|
||||
tags:
|
||||
- inframonitoring
|
||||
/api/v2/infra_monitoring/deployments:
|
||||
post:
|
||||
deprecated: false
|
||||
|
||||
@@ -54,6 +54,9 @@ type metaresource
|
||||
define update: [user, serviceaccount, role#assignee]
|
||||
define delete: [user, serviceaccount, role#assignee]
|
||||
|
||||
define attach: [user, serviceaccount, role#assignee]
|
||||
define detach: [user, serviceaccount, role#assignee]
|
||||
|
||||
define block: [user, serviceaccount, role#assignee]
|
||||
|
||||
|
||||
|
||||
@@ -27,7 +27,7 @@ export default defineConfig({
|
||||
signal: true,
|
||||
useOperationIdAsQueryKey: false,
|
||||
},
|
||||
useDates: true,
|
||||
useDates: false,
|
||||
useNamedParameters: true,
|
||||
enumGenerationType: 'enum',
|
||||
mutator: {
|
||||
|
||||
@@ -14,6 +14,7 @@ import type {
|
||||
|
||||
import type {
|
||||
InframonitoringtypesPostableClustersDTO,
|
||||
InframonitoringtypesPostableDaemonSetsDTO,
|
||||
InframonitoringtypesPostableDeploymentsDTO,
|
||||
InframonitoringtypesPostableHostsDTO,
|
||||
InframonitoringtypesPostableJobsDTO,
|
||||
@@ -23,6 +24,7 @@ import type {
|
||||
InframonitoringtypesPostableStatefulSetsDTO,
|
||||
InframonitoringtypesPostableVolumesDTO,
|
||||
ListClusters200,
|
||||
ListDaemonSets200,
|
||||
ListDeployments200,
|
||||
ListHosts200,
|
||||
ListJobs200,
|
||||
@@ -120,6 +122,89 @@ export const useListClusters = <
|
||||
> => {
|
||||
return useMutation(getListClustersMutationOptions(options));
|
||||
};
|
||||
/**
|
||||
* Returns a paginated list of Kubernetes DaemonSets with key aggregated pod metrics: CPU usage and memory working set summed across pods owned by the daemonset, plus average CPU/memory request and limit utilization (daemonSetCPURequest, daemonSetCPULimit, daemonSetMemoryRequest, daemonSetMemoryLimit). Each row also reports the latest known node-level counters from kube-state-metrics: desiredNodes (k8s.daemonset.desired_scheduled_nodes, the number of nodes the daemonset wants to run on) and currentNodes (k8s.daemonset.current_scheduled_nodes, the number of nodes the daemonset currently runs on) — note these are node counts, not pod counts. It also reports per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each daemonset includes metadata attributes (k8s.daemonset.name, k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.daemonset.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods owned by daemonsets in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit / memory / memory_request / memory_limit / desired_nodes / current_nodes, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (daemonSetCPU, daemonSetCPURequest, daemonSetCPULimit, daemonSetMemory, daemonSetMemoryRequest, daemonSetMemoryLimit, desiredNodes, currentNodes) return -1 as a sentinel when no data is available for that field.
|
||||
* @summary List DaemonSets for Infra Monitoring
|
||||
*/
|
||||
export const listDaemonSets = (
|
||||
inframonitoringtypesPostableDaemonSetsDTO?: BodyType<InframonitoringtypesPostableDaemonSetsDTO>,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<ListDaemonSets200>({
|
||||
url: `/api/v2/infra_monitoring/daemonsets`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: inframonitoringtypesPostableDaemonSetsDTO,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getListDaemonSetsMutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listDaemonSets>>,
|
||||
TError,
|
||||
{ data?: BodyType<InframonitoringtypesPostableDaemonSetsDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listDaemonSets>>,
|
||||
TError,
|
||||
{ data?: BodyType<InframonitoringtypesPostableDaemonSetsDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['listDaemonSets'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof listDaemonSets>>,
|
||||
{ data?: BodyType<InframonitoringtypesPostableDaemonSetsDTO> }
|
||||
> = (props) => {
|
||||
const { data } = props ?? {};
|
||||
|
||||
return listDaemonSets(data);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type ListDaemonSetsMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof listDaemonSets>>
|
||||
>;
|
||||
export type ListDaemonSetsMutationBody =
|
||||
| BodyType<InframonitoringtypesPostableDaemonSetsDTO>
|
||||
| undefined;
|
||||
export type ListDaemonSetsMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary List DaemonSets for Infra Monitoring
|
||||
*/
|
||||
export const useListDaemonSets = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listDaemonSets>>,
|
||||
TError,
|
||||
{ data?: BodyType<InframonitoringtypesPostableDaemonSetsDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof listDaemonSets>>,
|
||||
TError,
|
||||
{ data?: BodyType<InframonitoringtypesPostableDaemonSetsDTO> },
|
||||
TContext
|
||||
> => {
|
||||
return useMutation(getListDaemonSetsMutationOptions(options));
|
||||
};
|
||||
/**
|
||||
* Returns a paginated list of Kubernetes Deployments with key aggregated pod metrics: CPU usage and memory working set summed across pods owned by the deployment, plus average CPU/memory request and limit utilization (deploymentCPURequest, deploymentCPULimit, deploymentMemoryRequest, deploymentMemoryLimit). Each row also reports the latest known desiredPods (k8s.deployment.desired) and availablePods (k8s.deployment.available) replica counts and per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each deployment includes metadata attributes (k8s.deployment.name, k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.deployment.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods owned by deployments in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit / memory / memory_request / memory_limit / desired_pods / available_pods, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (deploymentCPU, deploymentCPURequest, deploymentCPULimit, deploymentMemory, deploymentMemoryRequest, deploymentMemoryLimit, desiredPods, availablePods) return -1 as a sentinel when no data is available for that field.
|
||||
* @summary List Deployments for Infra Monitoring
|
||||
|
||||
@@ -10,7 +10,7 @@ export interface AlertmanagertypesChannelDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -35,7 +35,7 @@ export interface AlertmanagertypesChannelDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface ModelLabelSetDTO {
|
||||
@@ -63,7 +63,7 @@ export interface AlertmanagertypesDeprecatedGettableAlertDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
endsAt?: Date;
|
||||
endsAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -81,7 +81,7 @@ export interface AlertmanagertypesDeprecatedGettableAlertDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
startsAt?: Date;
|
||||
startsAt?: string;
|
||||
status?: TypesAlertStatusDTO;
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ export interface AlertmanagertypesGettableRoutePolicyDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt: Date;
|
||||
createdAt: string;
|
||||
/**
|
||||
* @type string,null
|
||||
*/
|
||||
@@ -128,7 +128,7 @@ export interface AlertmanagertypesGettableRoutePolicyDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt: Date;
|
||||
updatedAt: string;
|
||||
/**
|
||||
* @type string,null
|
||||
*/
|
||||
@@ -1835,7 +1835,7 @@ export interface AuthtypesGettableAuthDomainDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -1852,7 +1852,7 @@ export interface AuthtypesGettableAuthDomainDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface AuthtypesGettableTokenDTO {
|
||||
@@ -2010,7 +2010,7 @@ export interface AuthtypesRoleDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2035,7 +2035,7 @@ export interface AuthtypesRoleDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface AuthtypesSessionContextDTO {
|
||||
@@ -2063,7 +2063,7 @@ export interface AuthtypesUserRoleDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt: Date;
|
||||
createdAt: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2077,7 +2077,7 @@ export interface AuthtypesUserRoleDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt: Date;
|
||||
updatedAt: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2089,7 +2089,7 @@ export interface AuthtypesUserWithRolesDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2118,7 +2118,7 @@ export interface AuthtypesUserWithRolesDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
/**
|
||||
* @type array,null
|
||||
*/
|
||||
@@ -2285,7 +2285,7 @@ export interface CloudintegrationtypesAccountDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2306,12 +2306,12 @@ export interface CloudintegrationtypesAccountDTO {
|
||||
* @type string,null
|
||||
* @format date-time
|
||||
*/
|
||||
removedAt: Date | null;
|
||||
removedAt: string | null;
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface DashboardtypesStorableDashboardDataDTO {
|
||||
@@ -2442,7 +2442,7 @@ export type CloudintegrationtypesCloudIntegrationServiceDTOAnyOf = {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2452,7 +2452,7 @@ export type CloudintegrationtypesCloudIntegrationServiceDTOAnyOf = {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -2646,12 +2646,12 @@ export interface CloudintegrationtypesGettableAgentCheckInDTO {
|
||||
* @type string,null
|
||||
* @format date-time
|
||||
*/
|
||||
removed_at: Date | null;
|
||||
removed_at: string | null;
|
||||
/**
|
||||
* @type string,null
|
||||
* @format date-time
|
||||
*/
|
||||
removedAt: Date | null;
|
||||
removedAt: string | null;
|
||||
}
|
||||
|
||||
export interface CloudintegrationtypesServiceMetadataDTO {
|
||||
@@ -2886,7 +2886,7 @@ export interface DashboardtypesDashboardDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -2908,7 +2908,7 @@ export interface DashboardtypesDashboardDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -3090,7 +3090,7 @@ export interface GatewaytypesLimitDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
created_at?: Date;
|
||||
created_at?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -3112,7 +3112,7 @@ export interface GatewaytypesLimitDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updated_at?: Date;
|
||||
updated_at?: string;
|
||||
}
|
||||
|
||||
export interface GatewaytypesIngestionKeyDTO {
|
||||
@@ -3120,12 +3120,12 @@ export interface GatewaytypesIngestionKeyDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
created_at?: Date;
|
||||
created_at?: string;
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
expires_at?: Date;
|
||||
expires_at?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -3146,7 +3146,7 @@ export interface GatewaytypesIngestionKeyDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updated_at?: Date;
|
||||
updated_at?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -3170,7 +3170,7 @@ export interface GatewaytypesPostableIngestionKeyDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
expires_at?: Date;
|
||||
expires_at?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -3376,6 +3376,84 @@ export interface InframonitoringtypesClustersDTO {
|
||||
warning?: Querybuildertypesv5QueryWarnDataDTO;
|
||||
}
|
||||
|
||||
export type InframonitoringtypesDaemonSetRecordDTOMetaAnyOf = {
|
||||
[key: string]: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
export type InframonitoringtypesDaemonSetRecordDTOMeta =
|
||||
InframonitoringtypesDaemonSetRecordDTOMetaAnyOf | null;
|
||||
|
||||
export interface InframonitoringtypesDaemonSetRecordDTO {
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
currentNodes: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
daemonSetCPU: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
daemonSetCPULimit: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
daemonSetCPURequest: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
daemonSetMemory: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
daemonSetMemoryLimit: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
daemonSetMemoryRequest: number;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
daemonSetName: string;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
desiredNodes: number;
|
||||
/**
|
||||
* @type object,null
|
||||
*/
|
||||
meta: InframonitoringtypesDaemonSetRecordDTOMeta;
|
||||
podCountsByPhase: InframonitoringtypesPodCountsByPhaseDTO;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesDaemonSetsDTO {
|
||||
/**
|
||||
* @type boolean
|
||||
*/
|
||||
endTimeBeforeRetention: boolean;
|
||||
/**
|
||||
* @type array,null
|
||||
*/
|
||||
records: InframonitoringtypesDaemonSetRecordDTO[] | null;
|
||||
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
total: number;
|
||||
type: InframonitoringtypesResponseTypeDTO;
|
||||
warning?: Querybuildertypesv5QueryWarnDataDTO;
|
||||
}
|
||||
|
||||
export type InframonitoringtypesDeploymentRecordDTOMetaAnyOf = {
|
||||
[key: string]: string;
|
||||
};
|
||||
@@ -3926,6 +4004,33 @@ export interface InframonitoringtypesPostableClustersDTO {
|
||||
start: number;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesPostableDaemonSetsDTO {
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
end: number;
|
||||
filter?: Querybuildertypesv5FilterDTO;
|
||||
/**
|
||||
* @type array,null
|
||||
*/
|
||||
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
limit: number;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
offset?: number;
|
||||
orderBy?: Querybuildertypesv5OrderByDTO;
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
start: number;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesPostableDeploymentsDTO {
|
||||
/**
|
||||
* @type integer
|
||||
@@ -4335,7 +4440,7 @@ export interface LlmpricingruletypesLLMPricingRuleDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -4374,13 +4479,13 @@ export interface LlmpricingruletypesLLMPricingRuleDTO {
|
||||
* @type string,null
|
||||
* @format date-time
|
||||
*/
|
||||
syncedAt?: Date | null;
|
||||
syncedAt?: string | null;
|
||||
unit: LlmpricingruletypesLLMPricingRuleUnitDTO;
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -5606,7 +5711,7 @@ export interface Querybuildertypesv5RawRowDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
timestamp?: Date;
|
||||
timestamp?: string;
|
||||
}
|
||||
|
||||
export interface Querybuildertypesv5RawDataDTO {
|
||||
@@ -6075,7 +6180,7 @@ export interface RuletypesRecurrenceDTO {
|
||||
* @type string,null
|
||||
* @format date-time
|
||||
*/
|
||||
endTime?: Date | null;
|
||||
endTime?: string | null;
|
||||
/**
|
||||
* @type array,null
|
||||
*/
|
||||
@@ -6085,7 +6190,7 @@ export interface RuletypesRecurrenceDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
startTime: Date;
|
||||
startTime: string;
|
||||
}
|
||||
|
||||
export interface RuletypesScheduleDTO {
|
||||
@@ -6093,13 +6198,13 @@ export interface RuletypesScheduleDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
endTime?: Date;
|
||||
endTime?: string;
|
||||
recurrence?: RuletypesRecurrenceDTO;
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
startTime?: Date;
|
||||
startTime?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6115,7 +6220,7 @@ export interface RuletypesPlannedMaintenanceDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6139,7 +6244,7 @@ export interface RuletypesPlannedMaintenanceDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6302,7 +6407,7 @@ export interface RuletypesRuleDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6351,7 +6456,7 @@ export interface RuletypesRuleDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6370,7 +6475,7 @@ export interface ServiceaccounttypesGettableFactorAPIKeyDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type integer
|
||||
* @minimum 0
|
||||
@@ -6384,7 +6489,7 @@ export interface ServiceaccounttypesGettableFactorAPIKeyDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
lastObservedAt: Date;
|
||||
lastObservedAt: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6397,7 +6502,7 @@ export interface ServiceaccounttypesGettableFactorAPIKeyDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface ServiceaccounttypesGettableFactorAPIKeyWithKeyDTO {
|
||||
@@ -6442,7 +6547,7 @@ export interface ServiceaccounttypesServiceAccountDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6467,7 +6572,7 @@ export interface ServiceaccounttypesServiceAccountDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface ServiceaccounttypesServiceAccountRoleDTO {
|
||||
@@ -6475,7 +6580,7 @@ export interface ServiceaccounttypesServiceAccountRoleDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6493,7 +6598,7 @@ export interface ServiceaccounttypesServiceAccountRoleDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface ServiceaccounttypesServiceAccountWithRolesDTO {
|
||||
@@ -6501,7 +6606,7 @@ export interface ServiceaccounttypesServiceAccountWithRolesDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6530,7 +6635,7 @@ export interface ServiceaccounttypesServiceAccountWithRolesDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface ServiceaccounttypesUpdatableFactorAPIKeyDTO {
|
||||
@@ -6572,7 +6677,7 @@ export interface SpantypesSpanMapperGroupDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6597,7 +6702,7 @@ export interface SpantypesSpanMapperGroupDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6666,7 +6771,7 @@ export interface SpantypesSpanMapperDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -6692,7 +6797,7 @@ export interface SpantypesSpanMapperDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -7059,7 +7164,7 @@ export interface TypesDeprecatedUserDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -7092,7 +7197,7 @@ export interface TypesDeprecatedUserDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface TypesIdentifiableDTO {
|
||||
@@ -7107,7 +7212,7 @@ export interface TypesInviteDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -7140,7 +7245,7 @@ export interface TypesInviteDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface TypesOrganizationDTO {
|
||||
@@ -7152,7 +7257,7 @@ export interface TypesOrganizationDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -7174,7 +7279,7 @@ export interface TypesOrganizationDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface TypesPostableInviteDTO {
|
||||
@@ -7241,7 +7346,7 @@ export interface TypesResetPasswordTokenDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
expiresAt?: Date;
|
||||
expiresAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -7268,7 +7373,7 @@ export interface TypesUserDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
createdAt?: Date;
|
||||
createdAt?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
@@ -7297,7 +7402,7 @@ export interface TypesUserDTO {
|
||||
* @type string
|
||||
* @format date-time
|
||||
*/
|
||||
updatedAt?: Date;
|
||||
updatedAt?: string;
|
||||
}
|
||||
|
||||
export interface ZeustypesHostDTO {
|
||||
@@ -8430,6 +8535,14 @@ export type ListClusters200 = {
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type ListDaemonSets200 = {
|
||||
data: InframonitoringtypesDaemonSetsDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type ListDeployments200 = {
|
||||
data: InframonitoringtypesDeploymentsDTO;
|
||||
/**
|
||||
|
||||
@@ -59,7 +59,7 @@ function getDeleteTooltip(
|
||||
|
||||
function getInviteButtonLabel(
|
||||
isLoading: boolean,
|
||||
existingToken: { expiresAt?: Date } | undefined,
|
||||
existingToken: { expiresAt?: string } | undefined,
|
||||
isExpired: boolean,
|
||||
notFound: boolean,
|
||||
): string {
|
||||
|
||||
@@ -438,9 +438,7 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
data: {
|
||||
name: values.name,
|
||||
tags: updatedTags,
|
||||
expires_at: new Date(
|
||||
dayjs(values.expires_at).endOf('day').toISOString(),
|
||||
),
|
||||
expires_at: dayjs(values.expires_at).endOf('day').toISOString(),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -471,13 +469,11 @@ function MultiIngestionSettings(): JSX.Element {
|
||||
const requestPayload = {
|
||||
name: values.name,
|
||||
tags: updatedTags,
|
||||
expires_at: new Date(dayjs(values.expires_at).endOf('day').toISOString()),
|
||||
expires_at: dayjs(values.expires_at).endOf('day').toISOString(),
|
||||
};
|
||||
|
||||
createIngestionKey(
|
||||
{
|
||||
data: requestPayload,
|
||||
},
|
||||
{ data: requestPayload },
|
||||
{
|
||||
onSuccess: (_data) => {
|
||||
notifications.success({
|
||||
|
||||
@@ -79,12 +79,12 @@ describe('MultiIngestionSettings Page', () => {
|
||||
keys: [
|
||||
{
|
||||
name: 'Key One',
|
||||
expires_at: new Date(TEST_EXPIRES_AT),
|
||||
expires_at: TEST_EXPIRES_AT,
|
||||
value: 'secret',
|
||||
workspace_id: TEST_WORKSPACE_ID,
|
||||
id: 'k1',
|
||||
created_at: new Date(TEST_CREATED_UPDATED),
|
||||
updated_at: new Date(TEST_CREATED_UPDATED),
|
||||
created_at: TEST_CREATED_UPDATED,
|
||||
updated_at: TEST_CREATED_UPDATED,
|
||||
tags: [],
|
||||
limits: [
|
||||
{
|
||||
@@ -160,12 +160,12 @@ describe('MultiIngestionSettings Page', () => {
|
||||
keys: [
|
||||
{
|
||||
name: 'Key Logs',
|
||||
expires_at: new Date(TEST_EXPIRES_AT),
|
||||
expires_at: TEST_EXPIRES_AT,
|
||||
value: 'secret',
|
||||
workspace_id: TEST_WORKSPACE_ID,
|
||||
id: 'k2',
|
||||
created_at: new Date(TEST_CREATED_UPDATED),
|
||||
updated_at: new Date(TEST_CREATED_UPDATED),
|
||||
created_at: TEST_CREATED_UPDATED,
|
||||
updated_at: TEST_CREATED_UPDATED,
|
||||
tags: [],
|
||||
limits: [
|
||||
{
|
||||
@@ -238,12 +238,12 @@ describe('MultiIngestionSettings Page', () => {
|
||||
keys: [
|
||||
{
|
||||
name: KEY_NAME,
|
||||
expires_at: new Date(TEST_EXPIRES_AT),
|
||||
expires_at: TEST_EXPIRES_AT,
|
||||
value: 'secret',
|
||||
workspace_id: TEST_WORKSPACE_ID,
|
||||
id: 'k1',
|
||||
created_at: new Date(TEST_CREATED_UPDATED),
|
||||
updated_at: new Date(TEST_CREATED_UPDATED),
|
||||
created_at: TEST_CREATED_UPDATED,
|
||||
updated_at: TEST_CREATED_UPDATED,
|
||||
tags: [],
|
||||
limits: [
|
||||
{
|
||||
@@ -299,12 +299,12 @@ describe('MultiIngestionSettings Page', () => {
|
||||
keys: [
|
||||
{
|
||||
name: 'Key Regular',
|
||||
expires_at: new Date(TEST_EXPIRES_AT),
|
||||
expires_at: TEST_EXPIRES_AT,
|
||||
value: 'secret1',
|
||||
workspace_id: TEST_WORKSPACE_ID,
|
||||
id: 'k1',
|
||||
created_at: new Date(TEST_CREATED_UPDATED),
|
||||
updated_at: new Date(TEST_CREATED_UPDATED),
|
||||
created_at: TEST_CREATED_UPDATED,
|
||||
updated_at: TEST_CREATED_UPDATED,
|
||||
tags: [],
|
||||
limits: [],
|
||||
},
|
||||
@@ -319,12 +319,12 @@ describe('MultiIngestionSettings Page', () => {
|
||||
keys: [
|
||||
{
|
||||
name: 'Key Search Result',
|
||||
expires_at: new Date(TEST_EXPIRES_AT),
|
||||
expires_at: TEST_EXPIRES_AT,
|
||||
value: 'secret2',
|
||||
workspace_id: TEST_WORKSPACE_ID,
|
||||
id: 'k2',
|
||||
created_at: new Date(TEST_CREATED_UPDATED),
|
||||
updated_at: new Date(TEST_CREATED_UPDATED),
|
||||
created_at: TEST_CREATED_UPDATED,
|
||||
updated_at: TEST_CREATED_UPDATED,
|
||||
tags: [],
|
||||
limits: [],
|
||||
},
|
||||
|
||||
@@ -13,9 +13,9 @@ describe('filterAlerts', () => {
|
||||
const mockAlertBase: Partial<RuletypesRuleDTO> = {
|
||||
state: 'active' as RuletypesAlertStateDTO,
|
||||
disabled: false,
|
||||
createdAt: new Date('2024-01-01T00:00:00Z'),
|
||||
createdAt: '2024-01-01T00:00:00Z',
|
||||
createdBy: 'test-user',
|
||||
updatedAt: new Date('2024-01-01T00:00:00Z'),
|
||||
updatedAt: '2024-01-01T00:00:00Z',
|
||||
updatedBy: 'test-user',
|
||||
version: '1',
|
||||
condition: {
|
||||
|
||||
@@ -20,7 +20,7 @@ const mockUsers: TypesUserDTO[] = [
|
||||
displayName: 'Alice Smith',
|
||||
email: 'alice@signoz.io',
|
||||
status: 'active',
|
||||
createdAt: new Date('2024-01-01T00:00:00.000Z'),
|
||||
createdAt: '2024-01-01T00:00:00.000Z',
|
||||
orgId: 'org-1',
|
||||
},
|
||||
{
|
||||
@@ -28,7 +28,7 @@ const mockUsers: TypesUserDTO[] = [
|
||||
displayName: 'Bob Jones',
|
||||
email: 'bob@signoz.io',
|
||||
status: 'active',
|
||||
createdAt: new Date('2024-01-02T00:00:00.000Z'),
|
||||
createdAt: '2024-01-02T00:00:00.000Z',
|
||||
orgId: 'org-1',
|
||||
},
|
||||
{
|
||||
@@ -36,7 +36,7 @@ const mockUsers: TypesUserDTO[] = [
|
||||
displayName: '',
|
||||
email: 'charlie@signoz.io',
|
||||
status: 'pending_invite',
|
||||
createdAt: new Date('2024-01-03T00:00:00.000Z'),
|
||||
createdAt: '2024-01-03T00:00:00.000Z',
|
||||
orgId: 'org-1',
|
||||
},
|
||||
{
|
||||
@@ -44,7 +44,7 @@ const mockUsers: TypesUserDTO[] = [
|
||||
displayName: 'Dave Deleted',
|
||||
email: 'dave@signoz.io',
|
||||
status: 'deleted',
|
||||
createdAt: new Date('2024-01-04T00:00:00.000Z'),
|
||||
createdAt: '2024-01-04T00:00:00.000Z',
|
||||
orgId: 'org-1',
|
||||
},
|
||||
];
|
||||
|
||||
@@ -24,7 +24,7 @@ import { PlannedDowntimeDeleteModal } from './PlannedDowntimeDeleteModal';
|
||||
import { PlannedDowntimeForm } from './PlannedDowntimeForm';
|
||||
import { PlannedDowntimeList } from './PlannedDowntimeList';
|
||||
import {
|
||||
defautlInitialValues,
|
||||
defaultInitialValues,
|
||||
deleteDowntimeHandler,
|
||||
} from './PlannedDowntimeutils';
|
||||
|
||||
@@ -48,9 +48,7 @@ export function PlannedDowntime(): JSX.Element {
|
||||
const urlQuery = useUrlQuery();
|
||||
|
||||
const [initialValues, setInitialValues] =
|
||||
useState<Partial<RuletypesPlannedMaintenanceDTO & { editMode: boolean }>>(
|
||||
defautlInitialValues,
|
||||
);
|
||||
useState<Partial<RuletypesPlannedMaintenanceDTO>>(defaultInitialValues);
|
||||
|
||||
const downtimeSchedules = useListDowntimeSchedules();
|
||||
const alertOptions = React.useMemo(
|
||||
@@ -148,7 +146,7 @@ export function PlannedDowntime(): JSX.Element {
|
||||
<Button
|
||||
type="primary"
|
||||
onClick={(): void => {
|
||||
setInitialValues({ ...defautlInitialValues, editMode: false });
|
||||
setInitialValues(defaultInitialValues);
|
||||
setIsOpen(true);
|
||||
setEditMode(false);
|
||||
form.resetFields();
|
||||
|
||||
@@ -46,8 +46,6 @@ import { AlertRuleTags } from './PlannedDowntimeList';
|
||||
import {
|
||||
getAlertOptionsFromIds,
|
||||
getDurationInfo,
|
||||
getEndTime,
|
||||
handleTimeConversion,
|
||||
isScheduleRecurring,
|
||||
recurrenceOptions,
|
||||
recurrenceOptionWithSubmenu,
|
||||
@@ -64,11 +62,19 @@ const TIME_FORMAT = DATE_TIME_FORMATS.TIME;
|
||||
const DATE_FORMAT = DATE_TIME_FORMATS.ORDINAL_DATE;
|
||||
const ORDINAL_FORMAT = DATE_TIME_FORMATS.ORDINAL_ONLY;
|
||||
|
||||
const TZ_OPTIONS: DefaultOptionType[] = ALL_TIME_ZONES.map(
|
||||
(timezone: string) => ({
|
||||
label: timezone,
|
||||
value: timezone,
|
||||
key: timezone,
|
||||
}),
|
||||
);
|
||||
|
||||
interface PlannedDowntimeFormData {
|
||||
name: string;
|
||||
startTime: dayjs.Dayjs | string;
|
||||
endTime: dayjs.Dayjs | string;
|
||||
recurrence?: RuletypesRecurrenceDTO | null;
|
||||
startTime: dayjs.Dayjs | null;
|
||||
endTime: dayjs.Dayjs | null;
|
||||
recurrence?: RuletypesRecurrenceDTO;
|
||||
alertRules: DefaultOptionType[];
|
||||
recurrenceSelect?: RuletypesRecurrenceDTO;
|
||||
timezone?: string;
|
||||
@@ -77,11 +83,7 @@ interface PlannedDowntimeFormData {
|
||||
const customFormat = DATE_TIME_FORMATS.ORDINAL_DATETIME;
|
||||
|
||||
interface PlannedDowntimeFormProps {
|
||||
initialValues: Partial<
|
||||
RuletypesPlannedMaintenanceDTO & {
|
||||
editMode: boolean;
|
||||
}
|
||||
>;
|
||||
initialValues: Partial<RuletypesPlannedMaintenanceDTO>;
|
||||
alertOptions: DefaultOptionType[];
|
||||
isError: boolean;
|
||||
isLoading: boolean;
|
||||
@@ -89,7 +91,7 @@ interface PlannedDowntimeFormProps {
|
||||
setIsOpen: React.Dispatch<React.SetStateAction<boolean>>;
|
||||
refetchAllSchedules: () => void;
|
||||
isEditMode: boolean;
|
||||
form: FormInstance<any>;
|
||||
form: FormInstance;
|
||||
}
|
||||
|
||||
export function PlannedDowntimeForm(
|
||||
@@ -107,66 +109,46 @@ export function PlannedDowntimeForm(
|
||||
form,
|
||||
} = props;
|
||||
|
||||
const [selectedTags, setSelectedTags] = React.useState<
|
||||
DefaultOptionType | DefaultOptionType[]
|
||||
>([]);
|
||||
const [selectedTags, setSelectedTags] = React.useState<DefaultOptionType[]>(
|
||||
[],
|
||||
);
|
||||
const alertRuleFormName = 'alertRules';
|
||||
const [saveLoading, setSaveLoading] = useState(false);
|
||||
const [durationUnit, setDurationUnit] = useState<string>(
|
||||
getDurationInfo(initialValues.schedule?.recurrence?.duration as string)
|
||||
?.unit || 'm',
|
||||
getDurationInfo(initialValues.schedule?.recurrence?.duration)?.unit || 'm',
|
||||
);
|
||||
|
||||
const [formData, setFormData] = useState<Partial<PlannedDowntimeFormData>>({
|
||||
timezone: initialValues.schedule?.timezone,
|
||||
});
|
||||
|
||||
const [recurrenceType, setRecurrenceType] = useState<string | null>(
|
||||
(initialValues.schedule?.recurrence?.repeatType as string) ||
|
||||
const [recurrenceType, setRecurrenceType] = useState<string>(
|
||||
initialValues.schedule?.recurrence?.repeatType ||
|
||||
recurrenceOptions.doesNotRepeat.value,
|
||||
);
|
||||
|
||||
const timezoneInitialValue = !isEmpty(initialValues.schedule?.timezone)
|
||||
? (initialValues.schedule?.timezone as string)
|
||||
: undefined;
|
||||
|
||||
const { notifications } = useNotifications();
|
||||
const { showErrorModal } = useErrorModal();
|
||||
|
||||
const requiredFieldRule = [{ required: true }];
|
||||
|
||||
const datePickerFooter = (mode: any): any =>
|
||||
mode === 'time' ? (
|
||||
<span style={{ color: 'gray' }}>Please select the time</span>
|
||||
) : null;
|
||||
|
||||
const saveHanlder = useCallback(
|
||||
const saveHandler = useCallback(
|
||||
async (values: PlannedDowntimeFormData) => {
|
||||
const shouldKeepLocalTime = !isEditMode;
|
||||
const data: RuletypesPostablePlannedMaintenanceDTO = {
|
||||
alertIds: values.alertRules
|
||||
.map((alert) => alert.value)
|
||||
.filter((alert) => alert !== undefined) as string[],
|
||||
name: values.name,
|
||||
schedule: {
|
||||
startTime: new Date(
|
||||
handleTimeConversion(
|
||||
values.startTime,
|
||||
timezoneInitialValue,
|
||||
values.timezone,
|
||||
shouldKeepLocalTime,
|
||||
),
|
||||
),
|
||||
timezone: values.timezone as string,
|
||||
endTime: values.endTime
|
||||
? new Date(
|
||||
handleTimeConversion(
|
||||
values.endTime,
|
||||
timezoneInitialValue,
|
||||
values.timezone,
|
||||
shouldKeepLocalTime,
|
||||
),
|
||||
)
|
||||
: undefined,
|
||||
recurrence: values.recurrence as RuletypesRecurrenceDTO,
|
||||
startTime: values.startTime?.format(),
|
||||
endTime: values.endTime?.format(),
|
||||
timezone: values.timezone!,
|
||||
recurrence: values.recurrence,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -198,50 +180,58 @@ export function PlannedDowntimeForm(
|
||||
notifications,
|
||||
refetchAllSchedules,
|
||||
setIsOpen,
|
||||
timezoneInitialValue,
|
||||
showErrorModal,
|
||||
],
|
||||
);
|
||||
const onFinish = async (values: PlannedDowntimeFormData): Promise<void> => {
|
||||
const { recurrence } = values;
|
||||
const recurrenceData =
|
||||
values?.recurrence?.repeatType === recurrenceOptions.doesNotRepeat.value
|
||||
!recurrence ||
|
||||
recurrence.repeatType === recurrenceOptions.doesNotRepeat.value
|
||||
? undefined
|
||||
: {
|
||||
duration: values.recurrence?.duration
|
||||
? `${values.recurrence?.duration}${durationUnit}`
|
||||
: undefined,
|
||||
endTime: !isEmpty(values.endTime)
|
||||
? handleTimeConversion(
|
||||
values.endTime,
|
||||
timezoneInitialValue,
|
||||
values.timezone,
|
||||
!isEditMode,
|
||||
)
|
||||
: undefined,
|
||||
startTime: handleTimeConversion(
|
||||
values.startTime,
|
||||
timezoneInitialValue,
|
||||
values.timezone,
|
||||
!isEditMode,
|
||||
),
|
||||
repeatOn: !values.recurrence?.repeatOn?.length
|
||||
? undefined
|
||||
: values.recurrence?.repeatOn,
|
||||
repeatType: values.recurrence?.repeatType,
|
||||
duration: recurrence.duration
|
||||
? `${recurrence.duration}${durationUnit}`
|
||||
: '',
|
||||
startTime: values.startTime!.format(),
|
||||
endTime: values.endTime?.format(),
|
||||
repeatOn: recurrence.repeatOn,
|
||||
repeatType: recurrence.repeatType,
|
||||
};
|
||||
|
||||
const payloadValues = {
|
||||
await saveHandler({
|
||||
...values,
|
||||
recurrence: recurrenceData as RuletypesRecurrenceDTO | undefined,
|
||||
};
|
||||
await saveHanlder(payloadValues);
|
||||
recurrence: recurrenceData,
|
||||
});
|
||||
};
|
||||
|
||||
const formValidationRules = [
|
||||
{
|
||||
required: true,
|
||||
},
|
||||
];
|
||||
const handleFormData = (data: Partial<PlannedDowntimeFormData>): void => {
|
||||
const { startTime, endTime, timezone } = data;
|
||||
const update: Partial<PlannedDowntimeFormData> = {};
|
||||
|
||||
// If the set timezone doesn't match, update it.
|
||||
if (
|
||||
startTime &&
|
||||
timezone &&
|
||||
startTime.format() !== startTime.tz(timezone, true).format()
|
||||
) {
|
||||
update.startTime = startTime.tz(timezone, true);
|
||||
}
|
||||
if (
|
||||
endTime &&
|
||||
timezone &&
|
||||
endTime.format() !== endTime.tz(timezone, true).format()
|
||||
) {
|
||||
update.endTime = endTime.tz(timezone, true);
|
||||
}
|
||||
|
||||
if (!isEmpty(update)) {
|
||||
data = { ...data, ...update };
|
||||
form.setFieldsValue({ ...update });
|
||||
}
|
||||
|
||||
setFormData(data);
|
||||
};
|
||||
|
||||
const handleOk = async (): Promise<void> => {
|
||||
await form.validateFields().catch(() => {
|
||||
@@ -249,16 +239,11 @@ export function PlannedDowntimeForm(
|
||||
});
|
||||
};
|
||||
|
||||
const handleCancel = (): void => {
|
||||
setIsOpen(false);
|
||||
};
|
||||
const handleCancel = (): void => setIsOpen(false);
|
||||
|
||||
const handleChange = (
|
||||
_value: string,
|
||||
options: DefaultOptionType | DefaultOptionType[],
|
||||
): void => {
|
||||
const handleAlertRulesChange: SelectProps['onChange'] = (_value, options) => {
|
||||
form.setFieldValue(alertRuleFormName, options);
|
||||
setSelectedTags(options);
|
||||
setSelectedTags(Array.isArray(options) ? options : [options]);
|
||||
};
|
||||
|
||||
const noTagRenderer: SelectProps['tagRender'] = () => <></>;
|
||||
@@ -267,113 +252,51 @@ export function PlannedDowntimeForm(
|
||||
if (!removedTag) {
|
||||
return;
|
||||
}
|
||||
const newTags = selectedTags.filter(
|
||||
(tag: DefaultOptionType) => tag.value !== removedTag,
|
||||
);
|
||||
const newTags = selectedTags.filter((tag) => tag.value !== removedTag);
|
||||
form.setFieldValue(alertRuleFormName, newTags);
|
||||
setSelectedTags(newTags);
|
||||
};
|
||||
|
||||
const formatedInitialValues = useMemo(() => {
|
||||
const formData: PlannedDowntimeFormData = {
|
||||
const formattedInitialValues = useMemo((): PlannedDowntimeFormData => {
|
||||
const { schedule } = initialValues;
|
||||
const startTime = schedule?.recurrence?.startTime || schedule?.startTime;
|
||||
const endTime = schedule?.recurrence?.endTime || schedule?.endTime;
|
||||
|
||||
return {
|
||||
name: defaultTo(initialValues.name, ''),
|
||||
alertRules: getAlertOptionsFromIds(
|
||||
initialValues.alertIds || [],
|
||||
alertOptions,
|
||||
),
|
||||
endTime: getEndTime(initialValues) ? dayjs(getEndTime(initialValues)) : '',
|
||||
startTime: initialValues.schedule?.startTime
|
||||
? dayjs(initialValues.schedule?.startTime)
|
||||
: '',
|
||||
startTime: startTime ? dayjs(startTime).tz(schedule.timezone) : null,
|
||||
endTime: endTime ? dayjs(endTime).tz(schedule.timezone) : null,
|
||||
recurrence: {
|
||||
...initialValues.schedule?.recurrence,
|
||||
repeatType: (!isScheduleRecurring(initialValues?.schedule)
|
||||
...schedule?.recurrence,
|
||||
repeatType: !isScheduleRecurring(schedule)
|
||||
? recurrenceOptions.doesNotRepeat.value
|
||||
: initialValues.schedule?.recurrence
|
||||
?.repeatType) as RuletypesRecurrenceDTO['repeatType'],
|
||||
duration: String(
|
||||
getDurationInfo(initialValues.schedule?.recurrence?.duration as string)
|
||||
?.value ?? '',
|
||||
),
|
||||
: schedule?.recurrence?.repeatType,
|
||||
duration: getDurationInfo(schedule?.recurrence?.duration)?.value ?? '',
|
||||
} as RuletypesRecurrenceDTO,
|
||||
timezone: initialValues.schedule?.timezone as string,
|
||||
timezone: schedule?.timezone as string,
|
||||
};
|
||||
return formData;
|
||||
}, [initialValues, alertOptions]);
|
||||
|
||||
useEffect(() => {
|
||||
setSelectedTags(formatedInitialValues.alertRules);
|
||||
form.setFieldsValue({ ...formatedInitialValues });
|
||||
}, [form, formatedInitialValues, initialValues]);
|
||||
|
||||
const timeZoneItems: DefaultOptionType[] = ALL_TIME_ZONES.map(
|
||||
(timezone: string) => ({
|
||||
label: timezone,
|
||||
value: timezone,
|
||||
key: timezone,
|
||||
}),
|
||||
);
|
||||
|
||||
const getTimezoneFormattedTime = (
|
||||
time: string | dayjs.Dayjs,
|
||||
timeZone?: string,
|
||||
isEditMode?: boolean,
|
||||
format?: string,
|
||||
): string => {
|
||||
if (!time) {
|
||||
return '';
|
||||
}
|
||||
if (!timeZone) {
|
||||
return dayjs(time).format(format);
|
||||
}
|
||||
return dayjs(time).tz(timeZone, isEditMode).format(format);
|
||||
};
|
||||
setSelectedTags(formattedInitialValues.alertRules);
|
||||
form.setFieldsValue({ ...formattedInitialValues });
|
||||
}, [form, formattedInitialValues, initialValues]);
|
||||
|
||||
const startTimeText = useMemo((): string => {
|
||||
let startTime = formData?.startTime;
|
||||
if (recurrenceType !== recurrenceOptions.doesNotRepeat.value) {
|
||||
startTime =
|
||||
(formData?.recurrence?.startTime
|
||||
? dayjs(formData.recurrence.startTime).toISOString()
|
||||
: '') ||
|
||||
formData?.startTime ||
|
||||
'';
|
||||
}
|
||||
|
||||
const startTime = formData.startTime;
|
||||
if (!startTime) {
|
||||
return '';
|
||||
}
|
||||
|
||||
if (formData.timezone) {
|
||||
startTime = handleTimeConversion(
|
||||
startTime,
|
||||
timezoneInitialValue,
|
||||
formData?.timezone,
|
||||
!isEditMode,
|
||||
);
|
||||
}
|
||||
const daysOfWeek = formData?.recurrence?.repeatOn;
|
||||
const daysOfWeek = formData.recurrence?.repeatOn;
|
||||
|
||||
const formattedStartTime = getTimezoneFormattedTime(
|
||||
startTime,
|
||||
formData.timezone,
|
||||
!isEditMode,
|
||||
TIME_FORMAT,
|
||||
);
|
||||
|
||||
const formattedStartDate = getTimezoneFormattedTime(
|
||||
startTime,
|
||||
formData.timezone,
|
||||
!isEditMode,
|
||||
DATE_FORMAT,
|
||||
);
|
||||
|
||||
const ordinalFormat = getTimezoneFormattedTime(
|
||||
startTime,
|
||||
formData.timezone,
|
||||
!isEditMode,
|
||||
ORDINAL_FORMAT,
|
||||
);
|
||||
const formattedStartTime = startTime.format(TIME_FORMAT);
|
||||
const formattedStartDate = startTime.format(DATE_FORMAT);
|
||||
const ordinalFormat = startTime.format(ORDINAL_FORMAT);
|
||||
|
||||
const formattedDaysOfWeek = daysOfWeek?.join(', ');
|
||||
switch (recurrenceType) {
|
||||
@@ -388,49 +311,18 @@ export function PlannedDowntimeForm(
|
||||
default:
|
||||
return `Scheduled for ${formattedStartDate} starting at ${formattedStartTime}.`;
|
||||
}
|
||||
}, [formData, recurrenceType, isEditMode, timezoneInitialValue]);
|
||||
}, [formData, recurrenceType, timezone]);
|
||||
|
||||
const endTimeText = useMemo((): string => {
|
||||
let endTime = formData?.endTime;
|
||||
if (recurrenceType !== recurrenceOptions.doesNotRepeat.value) {
|
||||
endTime =
|
||||
(formData?.recurrence?.endTime
|
||||
? dayjs(formData.recurrence.endTime).toISOString()
|
||||
: '') || '';
|
||||
|
||||
if (!isEditMode && !endTime) {
|
||||
endTime = formData?.endTime || '';
|
||||
}
|
||||
}
|
||||
|
||||
const endTime = formData.endTime;
|
||||
if (!endTime) {
|
||||
return '';
|
||||
}
|
||||
|
||||
if (formData.timezone) {
|
||||
endTime = handleTimeConversion(
|
||||
endTime,
|
||||
timezoneInitialValue,
|
||||
formData?.timezone,
|
||||
!isEditMode,
|
||||
);
|
||||
}
|
||||
|
||||
const formattedEndTime = getTimezoneFormattedTime(
|
||||
endTime,
|
||||
formData.timezone,
|
||||
!isEditMode,
|
||||
TIME_FORMAT,
|
||||
);
|
||||
|
||||
const formattedEndDate = getTimezoneFormattedTime(
|
||||
endTime,
|
||||
formData.timezone,
|
||||
!isEditMode,
|
||||
DATE_FORMAT,
|
||||
);
|
||||
const formattedEndTime = endTime.format(TIME_FORMAT);
|
||||
const formattedEndDate = endTime.format(DATE_FORMAT);
|
||||
return `Scheduled to end maintenance on ${formattedEndDate} at ${formattedEndTime}.`;
|
||||
}, [formData, recurrenceType, isEditMode, timezoneInitialValue]);
|
||||
}, [formData, recurrenceType, timezone]);
|
||||
|
||||
return (
|
||||
<Modal
|
||||
@@ -446,33 +338,28 @@ export function PlannedDowntimeForm(
|
||||
footer={null}
|
||||
>
|
||||
<Form<PlannedDowntimeFormData>
|
||||
name={initialValues.editMode ? 'edit-form' : 'create-form'}
|
||||
name={isEditMode ? 'edit-form' : 'create-form'}
|
||||
form={form}
|
||||
layout="vertical"
|
||||
className="createForm"
|
||||
onFinish={onFinish}
|
||||
onValuesChange={(): void => {
|
||||
setRecurrenceType(form.getFieldValue('recurrence')?.repeatType as string);
|
||||
setFormData(form.getFieldsValue());
|
||||
handleFormData(form.getFieldsValue());
|
||||
}}
|
||||
autoComplete="off"
|
||||
>
|
||||
<Form.Item label="Name" name="name" rules={formValidationRules}>
|
||||
<Form.Item label="Name" name="name" rules={requiredFieldRule}>
|
||||
<Input placeholder="e.g. Upgrade downtime" />
|
||||
</Form.Item>
|
||||
<Form.Item
|
||||
label="Starts from"
|
||||
name="startTime"
|
||||
rules={formValidationRules}
|
||||
rules={requiredFieldRule}
|
||||
className={!isEmpty(startTimeText) ? 'formItemWithBullet' : ''}
|
||||
getValueProps={(value): any => ({
|
||||
value: value ? dayjs(value).tz(timezoneInitialValue) : undefined,
|
||||
})}
|
||||
>
|
||||
<DatePicker
|
||||
format={(date): string =>
|
||||
dayjs(date).tz(timezoneInitialValue).format(customFormat)
|
||||
}
|
||||
format={(date) => date.format(customFormat)}
|
||||
showTime
|
||||
renderExtraFooter={datePickerFooter}
|
||||
showNow={false}
|
||||
@@ -485,7 +372,7 @@ export function PlannedDowntimeForm(
|
||||
<Form.Item
|
||||
label="Repeats every"
|
||||
name={['recurrence', 'repeatType']}
|
||||
rules={formValidationRules}
|
||||
rules={requiredFieldRule}
|
||||
>
|
||||
<Select
|
||||
placeholder="Select option..."
|
||||
@@ -496,7 +383,7 @@ export function PlannedDowntimeForm(
|
||||
<Form.Item
|
||||
label="Weekly occurernce"
|
||||
name={['recurrence', 'repeatOn']}
|
||||
rules={formValidationRules}
|
||||
rules={requiredFieldRule}
|
||||
>
|
||||
<Select
|
||||
placeholder="Select option..."
|
||||
@@ -510,16 +397,14 @@ export function PlannedDowntimeForm(
|
||||
<Form.Item
|
||||
label="Duration"
|
||||
name={['recurrence', 'duration']}
|
||||
rules={formValidationRules}
|
||||
rules={requiredFieldRule}
|
||||
>
|
||||
<Input
|
||||
addonAfter={
|
||||
<Select
|
||||
defaultValue="m"
|
||||
value={durationUnit}
|
||||
onChange={(value): void => {
|
||||
setDurationUnit(value);
|
||||
}}
|
||||
onChange={(value): void => setDurationUnit(value)}
|
||||
>
|
||||
<Select.Option value="m">Mins</Select.Option>
|
||||
<Select.Option value="h">Hours</Select.Option>
|
||||
@@ -533,8 +418,8 @@ export function PlannedDowntimeForm(
|
||||
/>
|
||||
</Form.Item>
|
||||
)}
|
||||
<Form.Item label="Timezone" name="timezone" rules={formValidationRules}>
|
||||
<Select options={timeZoneItems} placeholder="Select timezone" showSearch />
|
||||
<Form.Item label="Timezone" name="timezone" rules={requiredFieldRule}>
|
||||
<Select options={TZ_OPTIONS} placeholder="Select timezone" showSearch />
|
||||
</Form.Item>
|
||||
<Form.Item
|
||||
label="Ends on"
|
||||
@@ -546,14 +431,9 @@ export function PlannedDowntimeForm(
|
||||
},
|
||||
]}
|
||||
className={!isEmpty(endTimeText) ? 'formItemWithBullet' : ''}
|
||||
getValueProps={(value): any => ({
|
||||
value: value ? dayjs(value).tz(timezoneInitialValue) : undefined,
|
||||
})}
|
||||
>
|
||||
<DatePicker
|
||||
format={(date): string =>
|
||||
dayjs(date).tz(timezoneInitialValue).format(customFormat)
|
||||
}
|
||||
format={(date) => date.format(customFormat)}
|
||||
showTime
|
||||
showNow={false}
|
||||
renderExtraFooter={datePickerFooter}
|
||||
@@ -584,7 +464,7 @@ export function PlannedDowntimeForm(
|
||||
status={isError ? 'error' : undefined}
|
||||
loading={isLoading}
|
||||
tagRender={noTagRenderer}
|
||||
onChange={handleChange}
|
||||
onChange={handleAlertRulesChange}
|
||||
showSearch
|
||||
options={alertOptions}
|
||||
filterOption={(input, option): boolean =>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { ReactNode, useEffect } from 'react';
|
||||
import React, { ReactNode, useEffect } from 'react';
|
||||
import { UseQueryResult } from 'react-query';
|
||||
import { Color } from '@signozhq/design-tokens';
|
||||
import { Collapse, Flex, Space, Table, TableProps, Tag, Tooltip } from 'antd';
|
||||
@@ -8,7 +8,7 @@ import type {
|
||||
ListDowntimeSchedules200,
|
||||
RenderErrorResponseDTO,
|
||||
RuletypesPlannedMaintenanceDTO,
|
||||
RuletypesRecurrenceDTO,
|
||||
RuletypesScheduleDTO,
|
||||
} from 'api/generated/services/sigNoz.schemas';
|
||||
import type { ErrorType } from 'api/generatedAPIInstance';
|
||||
import cx from 'classnames';
|
||||
@@ -19,12 +19,11 @@ import { CalendarClock, PenLine, Trash2 } from '@signozhq/icons';
|
||||
import { useAppContext } from 'providers/App/App';
|
||||
import { USER_ROLES } from 'types/roles';
|
||||
|
||||
import { showErrorNotification } from '../../utils/error';
|
||||
import { showErrorNotification } from 'utils/error';
|
||||
import {
|
||||
formatDateTime,
|
||||
getAlertOptionsFromIds,
|
||||
getDuration,
|
||||
getEndTime,
|
||||
recurrenceInfo,
|
||||
} from './PlannedDowntimeutils';
|
||||
|
||||
@@ -126,29 +125,28 @@ export function CollapseListContent({
|
||||
created_at,
|
||||
created_by_name,
|
||||
created_by_email,
|
||||
timeframe,
|
||||
repeats,
|
||||
schedule,
|
||||
updated_at,
|
||||
updated_by_name,
|
||||
alertOptions,
|
||||
timezone,
|
||||
}: {
|
||||
created_at?: string;
|
||||
created_by_name?: string;
|
||||
created_by_email?: string;
|
||||
timeframe: [string | undefined | null, string | undefined | null];
|
||||
repeats?: RuletypesRecurrenceDTO | null;
|
||||
schedule?: RuletypesScheduleDTO;
|
||||
updated_at?: string;
|
||||
updated_by_name?: string;
|
||||
alertOptions?: DefaultOptionType[];
|
||||
timezone?: string;
|
||||
}): JSX.Element {
|
||||
const repeats = schedule?.recurrence;
|
||||
const renderItems = (title: string, value: ReactNode): JSX.Element => (
|
||||
<div className="render-item-collapse-list">
|
||||
<Typography>{title}</Typography>
|
||||
<div className="render-item-value">{value}</div>
|
||||
</div>
|
||||
);
|
||||
const startTime = formatDateTime(schedule?.startTime, schedule?.timezone);
|
||||
const endTime = formatDateTime(schedule?.endTime, schedule?.timezone);
|
||||
|
||||
return (
|
||||
<Flex vertical>
|
||||
@@ -183,16 +181,20 @@ export function CollapseListContent({
|
||||
|
||||
{renderItems(
|
||||
'Timeframe',
|
||||
timeframe[0] || timeframe[1] ? (
|
||||
<Typography>{`${formatDateTime(timeframe[0])} ⎯ ${formatDateTime(
|
||||
timeframe[1],
|
||||
)}`}</Typography>
|
||||
schedule?.startTime ? (
|
||||
<Typography>{`${startTime} ⎯ ${endTime}`}</Typography>
|
||||
) : (
|
||||
'-'
|
||||
),
|
||||
)}
|
||||
{renderItems('Timezone', <Typography>{timezone || '-'}</Typography>)}
|
||||
{renderItems('Repeats', <Typography>{recurrenceInfo(repeats)}</Typography>)}
|
||||
{renderItems(
|
||||
'Timezone',
|
||||
<Typography>{schedule?.timezone || '-'}</Typography>,
|
||||
)}
|
||||
{renderItems(
|
||||
'Repeats',
|
||||
<Typography>{recurrenceInfo(repeats, schedule?.timezone)}</Typography>,
|
||||
)}
|
||||
{renderItems(
|
||||
'Alerts silenced',
|
||||
alertOptions?.length ? (
|
||||
@@ -232,22 +234,12 @@ export function CustomCollapseList(
|
||||
setModalOpen,
|
||||
handleDeleteDowntime,
|
||||
setEditMode,
|
||||
kind,
|
||||
} = props;
|
||||
|
||||
const scheduleTime = schedule?.startTime
|
||||
? dayjs(schedule.startTime).toISOString()
|
||||
: createdAt
|
||||
? dayjs(createdAt).toISOString()
|
||||
: '';
|
||||
// Combine time and date
|
||||
const formattedDateAndTime = `Start time ⎯ ${formatDateTime(
|
||||
defaultTo(scheduleTime, ''),
|
||||
)} ${schedule?.timezone}`;
|
||||
const endTime = getEndTime({
|
||||
kind,
|
||||
schedule,
|
||||
} as Partial<RuletypesPlannedMaintenanceDTO>);
|
||||
? dayjs(schedule.startTime).tz(schedule.timezone)
|
||||
: createdAt || '';
|
||||
const formattedDateAndTime = `Start time ⎯ ${formatDateTime(scheduleTime)} ${schedule?.timezone}`;
|
||||
|
||||
return (
|
||||
<>
|
||||
@@ -257,21 +249,16 @@ export function CustomCollapseList(
|
||||
<HeaderComponent
|
||||
duration={
|
||||
schedule?.recurrence?.duration
|
||||
? (schedule?.recurrence?.duration as string)
|
||||
: getDuration(
|
||||
schedule?.startTime ? dayjs(schedule.startTime).toISOString() : '',
|
||||
schedule?.endTime ? dayjs(schedule.endTime).toISOString() : '',
|
||||
)
|
||||
? schedule.recurrence.duration
|
||||
: getDuration(schedule?.startTime || '', schedule?.endTime || '')
|
||||
}
|
||||
name={defaultTo(name, '')}
|
||||
handleEdit={(): void => {
|
||||
handleEdit={() => {
|
||||
setInitialValues({ ...props });
|
||||
setModalOpen(true);
|
||||
setEditMode(true);
|
||||
}}
|
||||
handleDelete={(): void => {
|
||||
handleDeleteDowntime(id ?? '', name || '');
|
||||
}}
|
||||
handleDelete={() => handleDeleteDowntime(id ?? '', name || '')}
|
||||
/>
|
||||
}
|
||||
key={id ?? ''}
|
||||
@@ -279,17 +266,10 @@ export function CustomCollapseList(
|
||||
<CollapseListContent
|
||||
created_at={createdAt ? dayjs(createdAt).toISOString() : ''}
|
||||
created_by_name={defaultTo(createdBy, '')}
|
||||
timeframe={[
|
||||
schedule?.startTime?.toString(),
|
||||
typeof endTime === 'string' ? endTime : endTime?.toString(),
|
||||
]}
|
||||
repeats={
|
||||
schedule?.recurrence as RuletypesRecurrenceDTO | null | undefined
|
||||
}
|
||||
schedule={schedule}
|
||||
updated_at={updatedAt ? dayjs(updatedAt).toISOString() : ''}
|
||||
updated_by_name={defaultTo(updatedBy, '')}
|
||||
alertOptions={alertOptions}
|
||||
timezone={defaultTo(schedule?.timezone, '')}
|
||||
/>
|
||||
</Panel>
|
||||
</Collapse>
|
||||
|
||||
@@ -11,8 +11,8 @@ import type {
|
||||
import type { ErrorType } from 'api/generatedAPIInstance';
|
||||
import { AxiosError } from 'axios';
|
||||
import { DATE_TIME_FORMATS } from 'constants/dateTimeFormats';
|
||||
import dayjs from 'dayjs';
|
||||
import { isEmpty, isEqual } from 'lodash-es';
|
||||
import dayjs, { Dayjs } from 'dayjs';
|
||||
import { isEmpty } from 'lodash-es';
|
||||
import APIError from 'types/api/error';
|
||||
|
||||
type DateTimeString = string | null | undefined;
|
||||
@@ -38,14 +38,20 @@ export const getDuration = (
|
||||
return `${hours} hours`;
|
||||
};
|
||||
|
||||
export const formatDateTime = (dateTimeString?: string | null): string => {
|
||||
export const formatDateTime = (
|
||||
dateTimeString?: string | Dayjs | null,
|
||||
timezone?: string,
|
||||
): string => {
|
||||
if (!dateTimeString) {
|
||||
return 'N/A';
|
||||
}
|
||||
|
||||
return dayjs(dateTimeString.slice(0, 19)).format(
|
||||
DATE_TIME_FORMATS.MONTH_DATETIME,
|
||||
);
|
||||
let dt = dayjs(dateTimeString);
|
||||
if (timezone) {
|
||||
dt = dt.tz(timezone);
|
||||
}
|
||||
|
||||
return dt.format(DATE_TIME_FORMATS.MONTH_DATETIME);
|
||||
};
|
||||
|
||||
export const getAlertOptionsFromIds = (
|
||||
@@ -61,6 +67,7 @@ export const getAlertOptionsFromIds = (
|
||||
|
||||
export const recurrenceInfo = (
|
||||
recurrence?: RuletypesRecurrenceDTO | null,
|
||||
timezone?: string,
|
||||
): string => {
|
||||
if (!recurrence) {
|
||||
return 'No';
|
||||
@@ -69,10 +76,10 @@ export const recurrenceInfo = (
|
||||
const { startTime, duration, repeatOn, repeatType, endTime } = recurrence;
|
||||
|
||||
const formattedStartTime = startTime
|
||||
? formatDateTime(dayjs(startTime).toISOString())
|
||||
? formatDateTime(startTime, timezone)
|
||||
: '';
|
||||
const formattedEndTime = endTime
|
||||
? `to ${formatDateTime(dayjs(endTime).toISOString())}`
|
||||
? `to ${formatDateTime(endTime, timezone)}`
|
||||
: '';
|
||||
const weeklyRepeatString = repeatOn ? `on ${repeatOn.join(', ')}` : '';
|
||||
const durationString = duration ? `- Duration: ${duration}` : '';
|
||||
@@ -80,9 +87,7 @@ export const recurrenceInfo = (
|
||||
return `Repeats - ${repeatType} ${weeklyRepeatString} from ${formattedStartTime} ${formattedEndTime} ${durationString}`;
|
||||
};
|
||||
|
||||
export const defautlInitialValues: Partial<
|
||||
RuletypesPlannedMaintenanceDTO & { editMode: boolean }
|
||||
> = {
|
||||
export const defaultInitialValues: Partial<RuletypesPlannedMaintenanceDTO> = {
|
||||
name: '',
|
||||
description: '',
|
||||
schedule: {
|
||||
@@ -94,7 +99,6 @@ export const defautlInitialValues: Partial<
|
||||
alertIds: [],
|
||||
createdAt: undefined,
|
||||
createdBy: undefined,
|
||||
editMode: false,
|
||||
};
|
||||
|
||||
type DeleteDowntimeScheduleProps = {
|
||||
@@ -210,75 +214,6 @@ export const recurrenceOptionWithSubmenu: Option[] = [
|
||||
recurrenceOptions.monthly,
|
||||
];
|
||||
|
||||
export const getRecurrenceOptionFromValue = (
|
||||
value?: string | Option | null,
|
||||
): Option | null | undefined => {
|
||||
if (!value) {
|
||||
return null;
|
||||
}
|
||||
if (typeof value === 'string') {
|
||||
return Object.values(recurrenceOptions).find(
|
||||
(option) => option.value === value,
|
||||
);
|
||||
}
|
||||
return value;
|
||||
};
|
||||
|
||||
export const getEndTime = ({
|
||||
kind,
|
||||
schedule,
|
||||
}: Partial<
|
||||
RuletypesPlannedMaintenanceDTO & {
|
||||
editMode: boolean;
|
||||
}
|
||||
>): string | dayjs.Dayjs => {
|
||||
if (kind === 'fixed') {
|
||||
return schedule?.endTime ? dayjs(schedule.endTime).toISOString() : '';
|
||||
}
|
||||
|
||||
return schedule?.recurrence?.endTime
|
||||
? dayjs(schedule.recurrence.endTime).toISOString()
|
||||
: '';
|
||||
};
|
||||
|
||||
export const isScheduleRecurring = (
|
||||
schedule?: RuletypesPlannedMaintenanceDTO['schedule'] | null,
|
||||
): boolean => (schedule ? !isEmpty(schedule?.recurrence) : false);
|
||||
|
||||
function convertUtcOffsetToTimezoneOffset(offsetMinutes: number): string {
|
||||
const sign = offsetMinutes >= 0 ? '+' : '-';
|
||||
const absOffset = Math.abs(offsetMinutes);
|
||||
const hours = String(Math.floor(absOffset / 60)).padStart(2, '0');
|
||||
const minutes = String(absOffset % 60).padStart(2, '0');
|
||||
return `${sign}${hours}:${minutes}`;
|
||||
}
|
||||
|
||||
export function formatWithTimezone(
|
||||
dateValue?: string | dayjs.Dayjs,
|
||||
timezone?: string,
|
||||
): string {
|
||||
const parsedDate =
|
||||
typeof dateValue === 'string' ? dateValue : dateValue?.format();
|
||||
|
||||
// Get the target timezone offset
|
||||
const targetOffset = convertUtcOffsetToTimezoneOffset(
|
||||
dayjs(dateValue).tz(timezone).utcOffset(),
|
||||
);
|
||||
|
||||
return `${parsedDate?.substring(0, 19)}${targetOffset}`;
|
||||
}
|
||||
|
||||
export function handleTimeConversion(
|
||||
dateValue: string | dayjs.Dayjs,
|
||||
timezoneInit?: string,
|
||||
timezone?: string,
|
||||
shouldKeepLocalTime?: boolean,
|
||||
): string {
|
||||
const timezoneChanged = !isEqual(timezoneInit, timezone);
|
||||
const initialTime = dayjs(dateValue).tz(timezoneInit);
|
||||
|
||||
const formattedTime = formatWithTimezone(initialTime, timezone);
|
||||
return timezoneChanged
|
||||
? formattedTime
|
||||
: dayjs(dateValue).tz(timezone, shouldKeepLocalTime).format();
|
||||
}
|
||||
|
||||
@@ -27,10 +27,10 @@ const MOCK_DATE_3 = '2024-01-03';
|
||||
const MOCK_DOWNTIME_1 = createMockDowntime({
|
||||
id: '1',
|
||||
name: MOCK_DOWNTIME_1_NAME,
|
||||
createdAt: new Date(MOCK_DATE_1),
|
||||
updatedAt: new Date(MOCK_DATE_1),
|
||||
createdAt: MOCK_DATE_1,
|
||||
updatedAt: MOCK_DATE_1,
|
||||
schedule: buildSchedule({
|
||||
startTime: new Date(MOCK_DATE_1),
|
||||
startTime: MOCK_DATE_1,
|
||||
timezone: 'UTC',
|
||||
}),
|
||||
alertIds: [],
|
||||
@@ -39,10 +39,10 @@ const MOCK_DOWNTIME_1 = createMockDowntime({
|
||||
const MOCK_DOWNTIME_2 = createMockDowntime({
|
||||
id: '2',
|
||||
name: MOCK_DOWNTIME_2_NAME,
|
||||
createdAt: new Date(MOCK_DATE_2),
|
||||
updatedAt: new Date(MOCK_DATE_2),
|
||||
createdAt: MOCK_DATE_2,
|
||||
updatedAt: MOCK_DATE_2,
|
||||
schedule: buildSchedule({
|
||||
startTime: new Date(MOCK_DATE_2),
|
||||
startTime: MOCK_DATE_2,
|
||||
timezone: 'UTC',
|
||||
}),
|
||||
alertIds: [],
|
||||
@@ -51,10 +51,10 @@ const MOCK_DOWNTIME_2 = createMockDowntime({
|
||||
const MOCK_DOWNTIME_3 = createMockDowntime({
|
||||
id: '3',
|
||||
name: MOCK_DOWNTIME_3_NAME,
|
||||
createdAt: new Date(MOCK_DATE_3),
|
||||
updatedAt: new Date(MOCK_DATE_3),
|
||||
createdAt: MOCK_DATE_3,
|
||||
updatedAt: MOCK_DATE_3,
|
||||
schedule: buildSchedule({
|
||||
startTime: new Date(MOCK_DATE_3),
|
||||
startTime: MOCK_DATE_3,
|
||||
timezone: 'UTC',
|
||||
}),
|
||||
alertIds: [],
|
||||
|
||||
@@ -24,7 +24,7 @@ export const createMockDowntime = (
|
||||
description: overrides.description ?? '',
|
||||
schedule: buildSchedule({
|
||||
timezone: 'UTC',
|
||||
startTime: new Date('2024-01-01'),
|
||||
startTime: '2024-01-01',
|
||||
...overrides.schedule,
|
||||
}),
|
||||
alertIds: overrides.alertIds ?? [],
|
||||
|
||||
@@ -118,10 +118,14 @@ export function buildPatchPayload({
|
||||
for (const res of resources) {
|
||||
const initial = initialConfig[res.id];
|
||||
const current = newConfig[res.id];
|
||||
const resourceDef = authzRes.resources.find((r) => r.kind === res.id);
|
||||
if (!resourceDef) {
|
||||
const found = authzRes.resources.find((r) => r.kind === res.id);
|
||||
if (!found) {
|
||||
continue;
|
||||
}
|
||||
const resourceDef: CoretypesResourceRefDTO = {
|
||||
kind: found.kind,
|
||||
type: found.type,
|
||||
};
|
||||
|
||||
const initialScope = initial?.scope ?? PermissionScope.ONLY_SELECTED;
|
||||
const currentScope = current?.scope ?? PermissionScope.ONLY_SELECTED;
|
||||
|
||||
@@ -6,14 +6,34 @@ export default {
|
||||
{
|
||||
kind: 'factor-api-key',
|
||||
type: 'metaresource',
|
||||
allowedVerbs: ['create', 'delete', 'list', 'read', 'update'],
|
||||
},
|
||||
{
|
||||
kind: 'role',
|
||||
type: 'role',
|
||||
allowedVerbs: [
|
||||
'assignee',
|
||||
'attach',
|
||||
'create',
|
||||
'delete',
|
||||
'detach',
|
||||
'list',
|
||||
'read',
|
||||
'update',
|
||||
],
|
||||
},
|
||||
{
|
||||
kind: 'serviceaccount',
|
||||
type: 'serviceaccount',
|
||||
allowedVerbs: [
|
||||
'attach',
|
||||
'create',
|
||||
'delete',
|
||||
'detach',
|
||||
'list',
|
||||
'read',
|
||||
'update',
|
||||
],
|
||||
},
|
||||
],
|
||||
relations: {
|
||||
|
||||
@@ -5,8 +5,8 @@ const orgId = '019ba2bb-2fa1-7b24-8159-cfca08617ef9';
|
||||
export const managedRoles: AuthtypesRoleDTO[] = [
|
||||
{
|
||||
id: '019c24aa-2248-756f-9833-984f1ab63819',
|
||||
createdAt: new Date('2026-02-03T18:00:55.624356Z'),
|
||||
updatedAt: new Date('2026-02-03T18:00:55.624356Z'),
|
||||
createdAt: '2026-02-03T18:00:55.624356Z',
|
||||
updatedAt: '2026-02-03T18:00:55.624356Z',
|
||||
name: 'signoz-admin',
|
||||
description:
|
||||
'Role assigned to users who have full administrative access to SigNoz resources.',
|
||||
@@ -15,8 +15,8 @@ export const managedRoles: AuthtypesRoleDTO[] = [
|
||||
},
|
||||
{
|
||||
id: '019c24aa-2248-757c-9faf-7b1e899751e0',
|
||||
createdAt: new Date('2026-02-03T18:00:55.624359Z'),
|
||||
updatedAt: new Date('2026-02-03T18:00:55.624359Z'),
|
||||
createdAt: '2026-02-03T18:00:55.624359Z',
|
||||
updatedAt: '2026-02-03T18:00:55.624359Z',
|
||||
name: 'signoz-editor',
|
||||
description:
|
||||
'Role assigned to users who can create, edit, and manage SigNoz resources but do not have full administrative privileges.',
|
||||
@@ -25,8 +25,8 @@ export const managedRoles: AuthtypesRoleDTO[] = [
|
||||
},
|
||||
{
|
||||
id: '019c24aa-2248-7585-a129-4188b3473c27',
|
||||
createdAt: new Date('2026-02-03T18:00:55.624362Z'),
|
||||
updatedAt: new Date('2026-02-03T18:00:55.624362Z'),
|
||||
createdAt: '2026-02-03T18:00:55.624362Z',
|
||||
updatedAt: '2026-02-03T18:00:55.624362Z',
|
||||
name: 'signoz-viewer',
|
||||
description:
|
||||
'Role assigned to users who have read-only access to SigNoz resources.',
|
||||
@@ -38,8 +38,8 @@ export const managedRoles: AuthtypesRoleDTO[] = [
|
||||
export const customRoles: AuthtypesRoleDTO[] = [
|
||||
{
|
||||
id: '019c24aa-3333-0001-aaaa-111111111111',
|
||||
createdAt: new Date('2026-02-10T10:30:00.000Z'),
|
||||
updatedAt: new Date('2026-02-12T14:20:00.000Z'),
|
||||
createdAt: '2026-02-10T10:30:00.000Z',
|
||||
updatedAt: '2026-02-12T14:20:00.000Z',
|
||||
name: 'billing-manager',
|
||||
description: 'Custom role for managing billing and invoices.',
|
||||
type: 'custom',
|
||||
@@ -47,8 +47,8 @@ export const customRoles: AuthtypesRoleDTO[] = [
|
||||
},
|
||||
{
|
||||
id: '019c24aa-3333-0002-bbbb-222222222222',
|
||||
createdAt: new Date('2026-02-11T09:00:00.000Z'),
|
||||
updatedAt: new Date('2026-02-13T11:45:00.000Z'),
|
||||
createdAt: '2026-02-11T09:00:00.000Z',
|
||||
updatedAt: '2026-02-13T11:45:00.000Z',
|
||||
name: 'dashboard-creator',
|
||||
description: 'Custom role allowing users to create and manage dashboards.',
|
||||
type: 'custom',
|
||||
|
||||
@@ -181,5 +181,24 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v2/infra_monitoring/daemonsets", handler.New(
|
||||
provider.authzMiddleware.ViewAccess(provider.infraMonitoringHandler.ListDaemonSets),
|
||||
handler.OpenAPIDef{
|
||||
ID: "ListDaemonSets",
|
||||
Tags: []string{"inframonitoring"},
|
||||
Summary: "List DaemonSets for Infra Monitoring",
|
||||
Description: "Returns a paginated list of Kubernetes DaemonSets with key aggregated pod metrics: CPU usage and memory working set summed across pods owned by the daemonset, plus average CPU/memory request and limit utilization (daemonSetCPURequest, daemonSetCPULimit, daemonSetMemoryRequest, daemonSetMemoryLimit). Each row also reports the latest known node-level counters from kube-state-metrics: desiredNodes (k8s.daemonset.desired_scheduled_nodes, the number of nodes the daemonset wants to run on) and currentNodes (k8s.daemonset.current_scheduled_nodes, the number of nodes the daemonset currently runs on) — note these are node counts, not pod counts. It also reports per-group podCountsByPhase ({ pending, running, succeeded, failed, unknown } from each pod's latest k8s.pod.phase value). Each daemonset includes metadata attributes (k8s.daemonset.name, k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.daemonset.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods owned by daemonsets in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_request / cpu_limit / memory / memory_request / memory_limit / desired_nodes / current_nodes, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (daemonSetCPU, daemonSetCPURequest, daemonSetCPULimit, daemonSetMemory, daemonSetMemoryRequest, daemonSetMemoryLimit, desiredNodes, currentNodes) return -1 as a sentinel when no data is available for that field.",
|
||||
Request: new(inframonitoringtypes.PostableDaemonSets),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(inframonitoringtypes.DaemonSets),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
148
pkg/modules/inframonitoring/implinframonitoring/daemonsets.go
Normal file
148
pkg/modules/inframonitoring/implinframonitoring/daemonsets.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// buildDaemonSetRecords assembles the page records. Pod phase counts come from
|
||||
// phaseCounts in both modes; every row is a group of pods (one daemonset in
|
||||
// list mode, an arbitrary roll-up in grouped_list mode), so there's no
|
||||
// per-row "current phase" concept.
|
||||
func buildDaemonSetRecords(
|
||||
resp *qbtypes.QueryRangeResponse,
|
||||
pageGroups []map[string]string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
metadataMap map[string]map[string]string,
|
||||
phaseCounts map[string]podPhaseCounts,
|
||||
) []inframonitoringtypes.DaemonSetRecord {
|
||||
metricsMap := parseFullQueryResponse(resp, groupBy)
|
||||
|
||||
records := make([]inframonitoringtypes.DaemonSetRecord, 0, len(pageGroups))
|
||||
for _, labels := range pageGroups {
|
||||
compositeKey := compositeKeyFromLabels(labels, groupBy)
|
||||
daemonSetName := labels[daemonSetNameAttrKey]
|
||||
|
||||
record := inframonitoringtypes.DaemonSetRecord{ // initialize with default values
|
||||
DaemonSetName: daemonSetName,
|
||||
DaemonSetCPU: -1,
|
||||
DaemonSetCPURequest: -1,
|
||||
DaemonSetCPULimit: -1,
|
||||
DaemonSetMemory: -1,
|
||||
DaemonSetMemoryRequest: -1,
|
||||
DaemonSetMemoryLimit: -1,
|
||||
DesiredNodes: -1,
|
||||
CurrentNodes: -1,
|
||||
Meta: map[string]string{},
|
||||
}
|
||||
|
||||
if metrics, ok := metricsMap[compositeKey]; ok {
|
||||
if v, exists := metrics["A"]; exists {
|
||||
record.DaemonSetCPU = v
|
||||
}
|
||||
if v, exists := metrics["B"]; exists {
|
||||
record.DaemonSetCPURequest = v
|
||||
}
|
||||
if v, exists := metrics["C"]; exists {
|
||||
record.DaemonSetCPULimit = v
|
||||
}
|
||||
if v, exists := metrics["D"]; exists {
|
||||
record.DaemonSetMemory = v
|
||||
}
|
||||
if v, exists := metrics["E"]; exists {
|
||||
record.DaemonSetMemoryRequest = v
|
||||
}
|
||||
if v, exists := metrics["F"]; exists {
|
||||
record.DaemonSetMemoryLimit = v
|
||||
}
|
||||
if v, exists := metrics["H"]; exists {
|
||||
record.DesiredNodes = int(v)
|
||||
}
|
||||
if v, exists := metrics["I"]; exists {
|
||||
record.CurrentNodes = int(v)
|
||||
}
|
||||
}
|
||||
|
||||
if phaseCountsForGroup, ok := phaseCounts[compositeKey]; ok {
|
||||
record.PodCountsByPhase = inframonitoringtypes.PodCountsByPhase{
|
||||
Pending: phaseCountsForGroup.Pending,
|
||||
Running: phaseCountsForGroup.Running,
|
||||
Succeeded: phaseCountsForGroup.Succeeded,
|
||||
Failed: phaseCountsForGroup.Failed,
|
||||
Unknown: phaseCountsForGroup.Unknown,
|
||||
}
|
||||
}
|
||||
|
||||
if attrs, ok := metadataMap[compositeKey]; ok {
|
||||
for k, v := range attrs {
|
||||
record.Meta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
records = append(records, record)
|
||||
}
|
||||
return records
|
||||
}
|
||||
|
||||
func (m *module) getTopDaemonSetGroups(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
req *inframonitoringtypes.PostableDaemonSets,
|
||||
metadataMap map[string]map[string]string,
|
||||
) ([]map[string]string, error) {
|
||||
orderByKey := req.OrderBy.Key.Name
|
||||
queryNamesForOrderBy := orderByToDaemonSetsQueryNames[orderByKey]
|
||||
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
|
||||
|
||||
topReq := &qbtypes.QueryRangeRequest{
|
||||
Start: uint64(req.Start),
|
||||
End: uint64(req.End),
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, envelope := range m.newDaemonSetsTableListQuery().CompositeQuery.Queries {
|
||||
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
|
||||
continue
|
||||
}
|
||||
copied := envelope
|
||||
if copied.Type == qbtypes.QueryTypeBuilder {
|
||||
existingExpr := ""
|
||||
if f := copied.GetFilter(); f != nil {
|
||||
existingExpr = f.Expression
|
||||
}
|
||||
reqFilterExpr := ""
|
||||
if req.Filter != nil {
|
||||
reqFilterExpr = req.Filter.Expression
|
||||
}
|
||||
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
|
||||
copied.SetFilter(&qbtypes.Filter{Expression: merged})
|
||||
copied.SetGroupBy(req.GroupBy)
|
||||
}
|
||||
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
|
||||
}
|
||||
|
||||
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
|
||||
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
|
||||
}
|
||||
|
||||
func (m *module) getDaemonSetsTableMetadata(ctx context.Context, req *inframonitoringtypes.PostableDaemonSets) (map[string]map[string]string, error) {
|
||||
var nonGroupByAttrs []string
|
||||
for _, key := range daemonSetAttrKeysForMetadata {
|
||||
if !isKeyInGroupByAttrs(req.GroupBy, key) {
|
||||
nonGroupByAttrs = append(nonGroupByAttrs, key)
|
||||
}
|
||||
}
|
||||
return m.getMetadata(ctx, daemonSetsTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
|
||||
}
|
||||
@@ -0,0 +1,237 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
const (
|
||||
daemonSetNameAttrKey = "k8s.daemonset.name"
|
||||
daemonSetsBaseFilterExpr = "k8s.daemonset.name != ''"
|
||||
)
|
||||
|
||||
var daemonSetNameGroupByKey = qbtypes.GroupByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: daemonSetNameAttrKey,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
}
|
||||
|
||||
// daemonSetsTableMetricNamesList drives the existence/retention check.
|
||||
// Includes k8s.pod.phase even though phase isn't part of the QB composite query —
|
||||
// it is queried separately via getPerGroupPodPhaseCounts, and we want the
|
||||
// response to short-circuit cleanly when the phase metric is absent.
|
||||
var daemonSetsTableMetricNamesList = []string{
|
||||
"k8s.pod.phase",
|
||||
"k8s.pod.cpu.usage",
|
||||
"k8s.pod.cpu_request_utilization",
|
||||
"k8s.pod.cpu_limit_utilization",
|
||||
"k8s.pod.memory.working_set",
|
||||
"k8s.pod.memory_request_utilization",
|
||||
"k8s.pod.memory_limit_utilization",
|
||||
"k8s.daemonset.desired_scheduled_nodes",
|
||||
"k8s.daemonset.current_scheduled_nodes",
|
||||
}
|
||||
|
||||
// Carried forward from v1 daemonSetAttrsToEnrich
|
||||
// (pkg/query-service/app/inframetrics/daemonsets.go:29-33).
|
||||
var daemonSetAttrKeysForMetadata = []string{
|
||||
"k8s.daemonset.name",
|
||||
"k8s.namespace.name",
|
||||
"k8s.cluster.name",
|
||||
}
|
||||
|
||||
// orderByToDaemonSetsQueryNames maps the orderBy column to the query name
|
||||
// used for ranking daemonset groups. v2 B/C/E/F are direct metrics, no
|
||||
// formula deps — so unlike v1 we don't carry A/D.
|
||||
var orderByToDaemonSetsQueryNames = map[string][]string{
|
||||
inframonitoringtypes.DaemonSetsOrderByCPU: {"A"},
|
||||
inframonitoringtypes.DaemonSetsOrderByCPURequest: {"B"},
|
||||
inframonitoringtypes.DaemonSetsOrderByCPULimit: {"C"},
|
||||
inframonitoringtypes.DaemonSetsOrderByMemory: {"D"},
|
||||
inframonitoringtypes.DaemonSetsOrderByMemoryRequest: {"E"},
|
||||
inframonitoringtypes.DaemonSetsOrderByMemoryLimit: {"F"},
|
||||
inframonitoringtypes.DaemonSetsOrderByDesiredNodes: {"H"},
|
||||
inframonitoringtypes.DaemonSetsOrderByCurrentNodes: {"I"},
|
||||
}
|
||||
|
||||
// newDaemonSetsTableListQuery builds the composite QB v5 request for the daemonsets list.
|
||||
// Eight builder queries: A..F roll up pod-level metrics by daemonset, H/I take the
|
||||
// latest daemonset-level desired/current scheduled-node counts. Restarts (v1 query G)
|
||||
// is intentionally omitted to match the v2 pods/deployments/statefulsets/jobs pattern.
|
||||
//
|
||||
// Every builder query carries the base filter `daemonSetsBaseFilterExpr`. Reason:
|
||||
// pod-level metrics (A..F) are emitted for every pod regardless of whether the
|
||||
// pod belongs to a DaemonSet; only DaemonSet-owned pods carry the
|
||||
// `k8s.daemonset.name` resource attribute. Without this filter, standalone pods
|
||||
// and pods owned by other workloads (Deployment/StatefulSet/Job/...) collapse into
|
||||
// a single empty-string group under the default groupBy. v1's GetDaemonSetList
|
||||
// applied the same filter via FilterOperatorExists.
|
||||
func (m *module) newDaemonSetsTableListQuery() *qbtypes.QueryRangeRequest {
|
||||
queries := []qbtypes.QueryEnvelope{
|
||||
// Query A: k8s.pod.cpu.usage — sum of pod CPU within the group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.cpu.usage",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
|
||||
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query B: k8s.pod.cpu_request_utilization — avg across pods in the group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.cpu_request_utilization",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
|
||||
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query C: k8s.pod.cpu_limit_utilization — avg across pods in the group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "C",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.cpu_limit_utilization",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
|
||||
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query D: k8s.pod.memory.working_set — sum of pod memory within the group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "D",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.memory.working_set",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
|
||||
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query E: k8s.pod.memory_request_utilization — avg across pods in the group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "E",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.memory_request_utilization",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
|
||||
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query F: k8s.pod.memory_limit_utilization — avg across pods in the group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "F",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.memory_limit_utilization",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
|
||||
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query H: k8s.daemonset.desired_scheduled_nodes — latest known desired node count per group.
|
||||
// v1 used TimeAggregationAnyLast (v3) → mapped to TimeAggregationLatest in v5;
|
||||
// SpaceAggregationSum + ReduceToLast preserve v1's "latest, summed across the group".
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "H",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.daemonset.desired_scheduled_nodes",
|
||||
TimeAggregation: metrictypes.TimeAggregationLatest,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToLast,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
|
||||
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query I: k8s.daemonset.current_scheduled_nodes — latest known currently scheduled node count per group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "I",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.daemonset.current_scheduled_nodes",
|
||||
TimeAggregation: metrictypes.TimeAggregationLatest,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToLast,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{Expression: daemonSetsBaseFilterExpr},
|
||||
GroupBy: []qbtypes.GroupByKey{daemonSetNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: queries,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -237,3 +237,27 @@ func (h *handler) ListJobs(rw http.ResponseWriter, req *http.Request) {
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *handler) ListDaemonSets(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
var parsedReq inframonitoringtypes.PostableDaemonSets
|
||||
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.module.ListDaemonSets(req.Context(), orgID, &parsedReq)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
@@ -900,3 +900,100 @@ func (m *module) ListJobs(ctx context.Context, orgID valuer.UUID, req *inframoni
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *module) ListDaemonSets(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableDaemonSets) (*inframonitoringtypes.DaemonSets, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &inframonitoringtypes.DaemonSets{}
|
||||
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: inframonitoringtypes.DaemonSetsOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
}
|
||||
}
|
||||
|
||||
if len(req.GroupBy) == 0 {
|
||||
req.GroupBy = []qbtypes.GroupByKey{daemonSetNameGroupByKey}
|
||||
resp.Type = inframonitoringtypes.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
|
||||
}
|
||||
|
||||
// Bake the workload base filter into req.Filter so all downstream helpers pick it up.
|
||||
if req.Filter == nil {
|
||||
req.Filter = &qbtypes.Filter{}
|
||||
}
|
||||
req.Filter.Expression = mergeFilterExpressions(daemonSetsBaseFilterExpr, req.Filter.Expression)
|
||||
|
||||
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, daemonSetsTableMetricNamesList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(missingMetrics) > 0 {
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
|
||||
resp.Records = []inframonitoringtypes.DaemonSetRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
if req.End < int64(minFirstReportedUnixMilli) {
|
||||
resp.EndTimeBeforeRetention = true
|
||||
resp.Records = []inframonitoringtypes.DaemonSetRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: []string{}}
|
||||
|
||||
metadataMap, err := m.getDaemonSetsTableMetadata(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Total = len(metadataMap)
|
||||
|
||||
pageGroups, err := m.getTopDaemonSetGroups(ctx, orgID, req, metadataMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pageGroups) == 0 {
|
||||
resp.Records = []inframonitoringtypes.DaemonSetRecord{}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
filterExpr := ""
|
||||
if req.Filter != nil {
|
||||
filterExpr = req.Filter.Expression
|
||||
}
|
||||
|
||||
fullQueryReq := buildFullQueryRequest(req.Start, req.End, filterExpr, req.GroupBy, pageGroups, m.newDaemonSetsTableListQuery())
|
||||
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reuse the pods phase-counts CTE function via a temp struct — it reads only
|
||||
// Start/End/Filter/GroupBy from PostablePods. Pods owned by a DaemonSet carry
|
||||
// k8s.daemonset.name as a resource attribute, so default-groupBy gives
|
||||
// per-daemonset phase counts automatically.
|
||||
phaseCounts, err := m.getPerGroupPodPhaseCounts(ctx, &inframonitoringtypes.PostablePods{
|
||||
Start: req.Start,
|
||||
End: req.End,
|
||||
Filter: req.Filter,
|
||||
GroupBy: req.GroupBy,
|
||||
}, pageGroups)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Records = buildDaemonSetRecords(queryResp, pageGroups, req.GroupBy, metadataMap, phaseCounts)
|
||||
resp.Warning = queryResp.Warning
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ type Handler interface {
|
||||
ListDeployments(http.ResponseWriter, *http.Request)
|
||||
ListStatefulSets(http.ResponseWriter, *http.Request)
|
||||
ListJobs(http.ResponseWriter, *http.Request)
|
||||
ListDaemonSets(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
type Module interface {
|
||||
@@ -30,4 +31,5 @@ type Module interface {
|
||||
ListDeployments(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableDeployments) (*inframonitoringtypes.Deployments, error)
|
||||
ListStatefulSets(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableStatefulSets) (*inframonitoringtypes.StatefulSets, error)
|
||||
ListJobs(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableJobs) (*inframonitoringtypes.Jobs, error)
|
||||
ListDaemonSets(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableDaemonSets) (*inframonitoringtypes.DaemonSets, error)
|
||||
}
|
||||
|
||||
@@ -202,6 +202,7 @@ func NewSQLMigrationProviderFactories(
|
||||
sqlmigration.NewAddLLMPricingRulesFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewMigrateMetaresourcesTuplesFactory(sqlstore),
|
||||
sqlmigration.NewAddTagsFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewAddRoleCRUDTuplesFactory(sqlstore),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
139
pkg/sqlmigration/083_add_role_crud_tuples.go
Normal file
139
pkg/sqlmigration/083_add_role_crud_tuples.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/oklog/ulid/v2"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type addRoleCRUDTuples struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func NewAddRoleCRUDTuplesFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_role_crud_tuples"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return &addRoleCRUDTuples{sqlstore: sqlstore}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (migration *addRoleCRUDTuples) Register(migrations *migrate.Migrations) error {
|
||||
return migrations.Register(migration.Up, migration.Down)
|
||||
}
|
||||
|
||||
func (migration *addRoleCRUDTuples) Up(ctx context.Context, db *bun.DB) error {
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
var storeID string
|
||||
err = tx.QueryRowContext(ctx, `SELECT id FROM store WHERE name = ? LIMIT 1`, "signoz").Scan(&storeID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var orgIDs []string
|
||||
rows, err := tx.QueryContext(ctx, `SELECT id FROM organizations`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var orgID string
|
||||
if err := rows.Scan(&orgID); err != nil {
|
||||
return err
|
||||
}
|
||||
orgIDs = append(orgIDs, orgID)
|
||||
}
|
||||
|
||||
isPG := migration.sqlstore.BunDB().Dialect().Name() == dialect.PG
|
||||
|
||||
// Migration 081 moved role tuples from "metaresources" to "role" type but
|
||||
// only inserted create and list. The read, update, and delete tuples were
|
||||
// lost in the migration. Re-add them here.
|
||||
tuples := []migrationTuple{
|
||||
{authtypes.SigNozAdminRoleName, "role", "role", "read"},
|
||||
{authtypes.SigNozAdminRoleName, "role", "role", "update"},
|
||||
{authtypes.SigNozAdminRoleName, "role", "role", "delete"},
|
||||
}
|
||||
|
||||
for _, orgID := range orgIDs {
|
||||
for _, tuple := range tuples {
|
||||
entropy := ulid.DefaultEntropy()
|
||||
now := time.Now().UTC()
|
||||
tupleID := ulid.MustNew(ulid.Timestamp(now), entropy).String()
|
||||
|
||||
objectID := "organization/" + orgID + "/" + tuple.objectName + "/*"
|
||||
roleSubject := "organization/" + orgID + "/role/" + tuple.roleName
|
||||
|
||||
if isPG {
|
||||
user := "role:" + roleSubject + "#assignee"
|
||||
result, err := tx.ExecContext(ctx, `
|
||||
INSERT INTO tuple (store, object_type, object_id, relation, _user, user_type, ulid, inserted_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT (store, object_type, object_id, relation, _user) DO NOTHING`,
|
||||
storeID, tuple.objectType, objectID, tuple.relation, user, "userset", tupleID, now,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rowsAffected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rowsAffected == 0 {
|
||||
continue
|
||||
}
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
INSERT INTO changelog (store, object_type, object_id, relation, _user, operation, ulid, inserted_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT (store, ulid, object_type) DO NOTHING`,
|
||||
storeID, tuple.objectType, objectID, tuple.relation, user, "TUPLE_OPERATION_WRITE", tupleID, now,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
result, err := tx.ExecContext(ctx, `
|
||||
INSERT INTO tuple (store, object_type, object_id, relation, user_object_type, user_object_id, user_relation, user_type, ulid, inserted_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT (store, object_type, object_id, relation, user_object_type, user_object_id, user_relation) DO NOTHING`,
|
||||
storeID, tuple.objectType, objectID, tuple.relation, "role", roleSubject, "assignee", "userset", tupleID, now,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rowsAffected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rowsAffected == 0 {
|
||||
continue
|
||||
}
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
INSERT INTO changelog (store, object_type, object_id, relation, user_object_type, user_object_id, user_relation, operation, ulid, inserted_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT (store, ulid, object_type) DO NOTHING`,
|
||||
storeID, tuple.objectType, objectID, tuple.relation, "role", roleSubject, "assignee", 0, tupleID, now,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (migration *addRoleCRUDTuples) Down(context.Context, *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
@@ -41,7 +41,7 @@ func (c *conditionBuilder) conditionFor(
|
||||
// TODO(Piyush): Update this to support multiple JSON columns based on evolutions
|
||||
for _, column := range columns {
|
||||
// TODO(Tushar): thread orgID here to evaluate correctly
|
||||
if column.Type.GetType() == schema.ColumnTypeEnumJSON && c.fl.BooleanOrEmpty(ctx, flagger.FeatureUseJSONBody, featuretypes.NewFlaggerEvaluationContext(valuer.UUID{})) && key.Name != messageSubField {
|
||||
if column.Type.GetType() == schema.ColumnTypeEnumJSON && key.FieldContext == telemetrytypes.FieldContextBody && c.fl.BooleanOrEmpty(ctx, flagger.FeatureUseJSONBody, featuretypes.NewFlaggerEvaluationContext(valuer.UUID{})) && key.Name != messageSubField {
|
||||
valueType, value := InferDataType(value, operator, key)
|
||||
cond, err := NewJSONConditionBuilder(key, valueType).buildJSONCondition(operator, value, sb)
|
||||
if err != nil {
|
||||
|
||||
@@ -33,7 +33,7 @@ func (t TestExpected) GetQuery() string {
|
||||
}
|
||||
|
||||
func TestJSONStmtBuilder_TimeSeries(t *testing.T) {
|
||||
statementBuilder := buildJSONTestStatementBuilder(t, false)
|
||||
statementBuilder, _ := buildJSONTestStatementBuilder(t, false)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -171,7 +171,7 @@ func TestStmtBuilderTimeSeriesBodyGroupByPromoted(t *testing.T) {
|
||||
*/
|
||||
|
||||
func TestJSONStmtBuilder_PrimitivePaths(t *testing.T) {
|
||||
statementBuilder := buildJSONTestStatementBuilder(t, false)
|
||||
statementBuilder, _ := buildJSONTestStatementBuilder(t, false)
|
||||
cases := []struct {
|
||||
name string
|
||||
filter string
|
||||
@@ -494,7 +494,7 @@ func TestStatementBuilderListQueryBodyPromoted(t *testing.T) {
|
||||
*/
|
||||
|
||||
func TestJSONStmtBuilder_ArrayPaths(t *testing.T) {
|
||||
statementBuilder := buildJSONTestStatementBuilder(t, false)
|
||||
statementBuilder, _ := buildJSONTestStatementBuilder(t, false)
|
||||
cases := []struct {
|
||||
name string
|
||||
filter string
|
||||
@@ -799,7 +799,7 @@ func TestJSONStmtBuilder_ArrayPaths(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestJSONStmtBuilder_IndexedPaths(t *testing.T) {
|
||||
statementBuilder := buildJSONTestStatementBuilder(t, true)
|
||||
statementBuilder, _ := buildJSONTestStatementBuilder(t, true)
|
||||
cases := []struct {
|
||||
name string
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
|
||||
@@ -918,7 +918,7 @@ func TestJSONStmtBuilder_IndexedPaths(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestJSONStmtBuilder_SelectField(t *testing.T) {
|
||||
statementBuilder := buildJSONTestStatementBuilder(t, false)
|
||||
statementBuilder, _ := buildJSONTestStatementBuilder(t, false)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -1006,7 +1006,7 @@ func TestJSONStmtBuilder_SelectField(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestJSONStmtBuilder_OrderBy(t *testing.T) {
|
||||
statementBuilder := buildJSONTestStatementBuilder(t, false)
|
||||
statementBuilder, _ := buildJSONTestStatementBuilder(t, false)
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
@@ -1082,6 +1082,69 @@ func TestJSONStmtBuilder_OrderBy(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceAggrAndGroupBy_WithJSONEnabled(t *testing.T) {
|
||||
statementBuilder, metadataStore := buildJSONTestStatementBuilder(t, false)
|
||||
releaseTime := time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC)
|
||||
keysMap := buildCompleteFieldKeyMap(releaseTime)
|
||||
for _, keys := range keysMap {
|
||||
for _, key := range keys {
|
||||
metadataStore.SetKey(key)
|
||||
}
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
requestType qbtypes.RequestType
|
||||
query qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]
|
||||
expected qbtypes.Statement
|
||||
expectedErrContains string
|
||||
}{
|
||||
{
|
||||
name: "resource_aggregation_and_group_by_with_json_enabled",
|
||||
requestType: qbtypes.RequestTypeTimeSeries,
|
||||
query: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "user.name exists",
|
||||
},
|
||||
Aggregations: []qbtypes.LogAggregation{
|
||||
{
|
||||
Expression: "count_distinct(service.name)",
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "SELECT toStartOfInterval(fromUnixTimestamp64Nano(timestamp), INTERVAL 30 SECOND) AS ts, toString(multiIf(resource.`region`::String IS NOT NULL, resource.`region`::String, NULL)) AS `region`, countDistinct(multiIf(resource.`service.name`::String IS NOT NULL, resource.`service.name`::String, NULL)) AS __result_0 FROM signoz_logs.distributed_logs_v2 WHERE ((dynamicElement(body_v2.`user.name`, 'String') IS NOT NULL) OR mapContains(attributes_string, 'user.name') = ?) AND timestamp >= ? AND ts_bucket_start >= ? AND timestamp < ? AND ts_bucket_start <= ? GROUP BY ts, `region`",
|
||||
Args: []any{true, "1747947419000000000", uint64(1747945619), "1747983448000000000", uint64(1747983448)},
|
||||
Warnings: []string{"Key `user.name` is ambiguous, found 2 different combinations of field context / data type: [name=user.name,context=body,datatype=string name=user.name,context=attribute,datatype=string]."},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
q, err := statementBuilder.Build(context.Background(), 1747947419000, 1747983448000, c.requestType, c.query, nil)
|
||||
if c.expectedErrContains != "" {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), c.expectedErrContains)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.expected.Query, q.Query)
|
||||
require.Equal(t, c.expected.Args, q.Args)
|
||||
require.Equal(t, c.expected.Warnings, q.Warnings)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func buildTestTelemetryMetadataStore(t *testing.T, addIndexes bool) *telemetrytypestest.MockMetadataStore {
|
||||
mockMetadataStore := telemetrytypestest.NewMockMetadataStore()
|
||||
mockMetadataStore.SetStaticFields(IntrinsicFields)
|
||||
@@ -1123,7 +1186,7 @@ func buildTestTelemetryMetadataStore(t *testing.T, addIndexes bool) *telemetryty
|
||||
return mockMetadataStore
|
||||
}
|
||||
|
||||
func buildJSONTestStatementBuilder(t *testing.T, addIndexes bool) *logQueryStatementBuilder {
|
||||
func buildJSONTestStatementBuilder(t *testing.T, addIndexes bool) (*logQueryStatementBuilder, *telemetrytypestest.MockMetadataStore) {
|
||||
t.Helper()
|
||||
|
||||
mockMetadataStore := buildTestTelemetryMetadataStore(t, addIndexes)
|
||||
@@ -1144,5 +1207,5 @@ func buildJSONTestStatementBuilder(t *testing.T, addIndexes bool) *logQueryState
|
||||
fl,
|
||||
)
|
||||
|
||||
return statementBuilder
|
||||
return statementBuilder, mockMetadataStore
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ type TransactionWithAuthorization struct {
|
||||
}
|
||||
|
||||
func NewTransaction(relation Relation, object coretypes.Object) (*Transaction, error) {
|
||||
if err := coretypes.ErrIfVerbNotValidForType(relation.Verb, object.Resource.Type); err != nil {
|
||||
if err := coretypes.ErrIfVerbNotValidForResource(relation.Verb, object.Resource); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -129,13 +129,13 @@ func NewPatchableObjects(additions []*ObjectGroup, deletions []*ObjectGroup, ver
|
||||
}
|
||||
|
||||
for _, objectGroup := range additions {
|
||||
if err := ErrIfVerbNotValidForType(verb, objectGroup.Resource.Type); err != nil {
|
||||
if err := ErrIfVerbNotValidForResource(verb, objectGroup.Resource); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, objectGroup := range deletions {
|
||||
if err := ErrIfVerbNotValidForType(verb, objectGroup.Resource.Type); err != nil {
|
||||
if err := ErrIfVerbNotValidForResource(verb, objectGroup.Resource); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ var (
|
||||
ResourceMetaResourceSavedView = NewResourceMetaResource(KindSavedView)
|
||||
ResourceMetaResourceTraceFunnel = NewResourceMetaResource(KindTraceFunnel)
|
||||
ResourceMetaResourceFactorPassword = NewResourceMetaResource(KindFactorPassword)
|
||||
ResourceMetaResourceFactorAPIKey = NewResourceMetaResource(KindFactorAPIKey)
|
||||
ResourceMetaResourceFactorAPIKey = NewResourceMetaResource(KindFactorAPIKey, VerbCreate, VerbList, VerbRead, VerbUpdate, VerbDelete)
|
||||
ResourceMetaResourceLicense = NewResourceMetaResource(KindLicense)
|
||||
ResourceMetaResourceSubscription = NewResourceMetaResource(KindSubscription)
|
||||
ResourceTelemetryResourceLogs = NewResourceTelemetryResource(KindLogs)
|
||||
|
||||
@@ -21,7 +21,7 @@ var (
|
||||
TypeServiceAccount = Type{valuer.NewString("serviceaccount"), regexp.MustCompile(`^(^[0-9a-f]{8}(?:\-[0-9a-f]{4}){3}-[0-9a-f]{12}$|\*)$`), []Verb{VerbCreate, VerbList, VerbRead, VerbUpdate, VerbDelete, VerbAttach, VerbDetach}}
|
||||
TypeAnonymous = Type{valuer.NewString("anonymous"), regexp.MustCompile(`^\*$`), []Verb{}}
|
||||
TypeRole = Type{valuer.NewString("role"), regexp.MustCompile(`^([a-z-]{1,50}|\*)$`), []Verb{VerbAssignee, VerbCreate, VerbList, VerbRead, VerbUpdate, VerbDelete, VerbAttach, VerbDetach}}
|
||||
TypeOrganization = Type{valuer.NewString("organization"), regexp.MustCompile(`^(^[0-9a-f]{8}(?:\-[0-9a-f]{4}){3}-[0-9a-f]{12}$|\*)$`), []Verb{VerbRead, VerbUpdate, VerbDelete}}
|
||||
TypeOrganization = Type{valuer.NewString("organization"), regexp.MustCompile(`^(^[0-9a-f]{8}(?:\-[0-9a-f]{4}){3}-[0-9a-f]{12}$|\*)$`), []Verb{VerbRead, VerbUpdate}}
|
||||
TypeMetaResource = Type{valuer.NewString("metaresource"), regexp.MustCompile(`^(^[0-9a-f]{8}(?:\-[0-9a-f]{4}){3}-[0-9a-f]{12}$|\*)$`), []Verb{VerbCreate, VerbList, VerbRead, VerbUpdate, VerbDelete, VerbAttach, VerbDetach}}
|
||||
TypeTelemetryResource = Type{valuer.NewString("telemetryresource"), regexp.MustCompile(`^\*$`), []Verb{VerbRead}}
|
||||
)
|
||||
|
||||
@@ -19,6 +19,12 @@ type Resource interface {
|
||||
|
||||
// Scope of the resource.
|
||||
Scope(verb Verb) string
|
||||
|
||||
// AllowedVerbs returns the verbs that are valid for this resource.
|
||||
// By default, this delegates to the type's allowed verbs, but specific
|
||||
// resources can restrict the set further (e.g., some metaresource kinds
|
||||
// may not support attach/detach).
|
||||
AllowedVerbs() []Verb
|
||||
}
|
||||
|
||||
type ResourceRef struct {
|
||||
|
||||
@@ -34,3 +34,7 @@ func (resourceAnonymous *resourceAnonymous) Object(orgID valuer.UUID, selector s
|
||||
func (resourceAnonymous *resourceAnonymous) Scope(verb Verb) string {
|
||||
return resourceAnonymous.Kind().String() + ":" + verb.StringValue()
|
||||
}
|
||||
|
||||
func (*resourceAnonymous) AllowedVerbs() []Verb {
|
||||
return TypeAnonymous.AllowedVerbs()
|
||||
}
|
||||
|
||||
@@ -5,11 +5,15 @@ import (
|
||||
)
|
||||
|
||||
type resourceMetaResource struct {
|
||||
kind Kind
|
||||
kind Kind
|
||||
allowedVerbs []Verb
|
||||
}
|
||||
|
||||
func NewResourceMetaResource(kind Kind) Resource {
|
||||
return &resourceMetaResource{kind: kind}
|
||||
func NewResourceMetaResource(kind Kind, allowedVerbs ...Verb) Resource {
|
||||
if len(allowedVerbs) == 0 {
|
||||
allowedVerbs = TypeMetaResource.AllowedVerbs()
|
||||
}
|
||||
return &resourceMetaResource{kind: kind, allowedVerbs: allowedVerbs}
|
||||
}
|
||||
|
||||
func (*resourceMetaResource) Type() Type {
|
||||
@@ -32,3 +36,7 @@ func (resourceMetaResource *resourceMetaResource) Object(orgID valuer.UUID, sele
|
||||
func (resourceMetaResource *resourceMetaResource) Scope(verb Verb) string {
|
||||
return resourceMetaResource.Kind().String() + ":" + verb.StringValue()
|
||||
}
|
||||
|
||||
func (resourceMetaResource *resourceMetaResource) AllowedVerbs() []Verb {
|
||||
return resourceMetaResource.allowedVerbs
|
||||
}
|
||||
|
||||
@@ -33,3 +33,7 @@ func (resourceOrganization *resourceOrganization) Object(orgID valuer.UUID, sele
|
||||
func (resourceOrganization *resourceOrganization) Scope(verb Verb) string {
|
||||
return resourceOrganization.Kind().String() + ":" + verb.StringValue()
|
||||
}
|
||||
|
||||
func (*resourceOrganization) AllowedVerbs() []Verb {
|
||||
return TypeOrganization.AllowedVerbs()
|
||||
}
|
||||
|
||||
@@ -34,3 +34,7 @@ func (resourceRole *resourceRole) Object(orgID valuer.UUID, selector string) str
|
||||
func (resourceRole *resourceRole) Scope(verb Verb) string {
|
||||
return resourceRole.Kind().String() + ":" + verb.StringValue()
|
||||
}
|
||||
|
||||
func (*resourceRole) AllowedVerbs() []Verb {
|
||||
return TypeRole.AllowedVerbs()
|
||||
}
|
||||
|
||||
@@ -34,3 +34,7 @@ func (resourceServiceAccount *resourceServiceAccount) Object(orgID valuer.UUID,
|
||||
func (resourceServiceAccount *resourceServiceAccount) Scope(verb Verb) string {
|
||||
return resourceServiceAccount.Kind().String() + ":" + verb.StringValue()
|
||||
}
|
||||
|
||||
func (*resourceServiceAccount) AllowedVerbs() []Verb {
|
||||
return TypeServiceAccount.AllowedVerbs()
|
||||
}
|
||||
|
||||
@@ -32,3 +32,7 @@ func (resourceTelemetryResource *resourceTelemetryResource) Object(orgID valuer.
|
||||
func (resourceTelemetryResource *resourceTelemetryResource) Scope(verb Verb) string {
|
||||
return resourceTelemetryResource.Kind().String() + ":" + verb.StringValue()
|
||||
}
|
||||
|
||||
func (*resourceTelemetryResource) AllowedVerbs() []Verb {
|
||||
return TypeTelemetryResource.AllowedVerbs()
|
||||
}
|
||||
|
||||
@@ -34,3 +34,7 @@ func (resourceUser *resourceUser) Object(orgID valuer.UUID, selector string) str
|
||||
func (resourceUser *resourceUser) Scope(verb Verb) string {
|
||||
return resourceUser.Kind().String() + ":" + verb.StringValue()
|
||||
}
|
||||
|
||||
func (*resourceUser) AllowedVerbs() []Verb {
|
||||
return TypeUser.AllowedVerbs()
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ type Transaction struct {
|
||||
}
|
||||
|
||||
func NewTransaction(verb Verb, object Object) (*Transaction, error) {
|
||||
if err := ErrIfVerbNotValidForType(verb, object.Resource.Type); err != nil {
|
||||
if err := ErrIfVerbNotValidForResource(verb, object.Resource); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -56,6 +56,25 @@ func ErrIfVerbNotValidForType(verb Verb, typed Type) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ErrIfVerbNotValidForResource(verb Verb, ref ResourceRef) error {
|
||||
if err := ErrIfVerbNotValidForType(verb, ref.Type); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resource, err := NewResourceFromTypeAndKind(ref.Type, ref.Kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, allowed := range resource.AllowedVerbs() {
|
||||
if verb == allowed {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return errors.Newf(errors.TypeInvalidInput, ErrCodeInvalidVerbForType, "verb %s is not valid for resource %s:%s", verb.StringValue(), ref.Type.StringValue(), ref.Kind.String())
|
||||
}
|
||||
|
||||
func (typed *Type) UnmarshalJSON(data []byte) error {
|
||||
str := ""
|
||||
err := json.Unmarshal(data, &str)
|
||||
|
||||
105
pkg/types/inframonitoringtypes/daemonsets.go
Normal file
105
pkg/types/inframonitoringtypes/daemonsets.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
type DaemonSets struct {
|
||||
Type ResponseType `json:"type" required:"true"`
|
||||
Records []DaemonSetRecord `json:"records" required:"true"`
|
||||
Total int `json:"total" required:"true"`
|
||||
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck" required:"true"`
|
||||
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention" required:"true"`
|
||||
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
|
||||
}
|
||||
|
||||
type DaemonSetRecord struct {
|
||||
DaemonSetName string `json:"daemonSetName" required:"true"`
|
||||
DaemonSetCPU float64 `json:"daemonSetCPU" required:"true"`
|
||||
DaemonSetCPURequest float64 `json:"daemonSetCPURequest" required:"true"`
|
||||
DaemonSetCPULimit float64 `json:"daemonSetCPULimit" required:"true"`
|
||||
DaemonSetMemory float64 `json:"daemonSetMemory" required:"true"`
|
||||
DaemonSetMemoryRequest float64 `json:"daemonSetMemoryRequest" required:"true"`
|
||||
DaemonSetMemoryLimit float64 `json:"daemonSetMemoryLimit" required:"true"`
|
||||
DesiredNodes int `json:"desiredNodes" required:"true"`
|
||||
CurrentNodes int `json:"currentNodes" required:"true"`
|
||||
PodCountsByPhase PodCountsByPhase `json:"podCountsByPhase" required:"true"`
|
||||
Meta map[string]string `json:"meta" required:"true"`
|
||||
}
|
||||
|
||||
// PostableDaemonSets is the request body for the v2 daemonsets list API.
|
||||
type PostableDaemonSets struct {
|
||||
Start int64 `json:"start" required:"true"`
|
||||
End int64 `json:"end" required:"true"`
|
||||
Filter *qbtypes.Filter `json:"filter"`
|
||||
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
|
||||
OrderBy *qbtypes.OrderBy `json:"orderBy"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit" required:"true"`
|
||||
}
|
||||
|
||||
// Validate ensures PostableDaemonSets contains acceptable values.
|
||||
func (req *PostableDaemonSets) Validate() error {
|
||||
if req == nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
|
||||
}
|
||||
|
||||
if req.Start <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid start time %d: start must be greater than 0",
|
||||
req.Start,
|
||||
)
|
||||
}
|
||||
|
||||
if req.End <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid end time %d: end must be greater than 0",
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Start >= req.End {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time range: start (%d) must be less than end (%d)",
|
||||
req.Start,
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Limit < 1 || req.Limit > 5000 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
|
||||
}
|
||||
|
||||
if req.Offset < 0 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
|
||||
}
|
||||
|
||||
if req.OrderBy != nil {
|
||||
if !slices.Contains(DaemonSetsValidOrderByKeys, req.OrderBy.Key.Name) {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
|
||||
}
|
||||
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON validates input immediately after decoding.
|
||||
func (req *PostableDaemonSets) UnmarshalJSON(data []byte) error {
|
||||
type raw PostableDaemonSets
|
||||
var decoded raw
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
*req = PostableDaemonSets(decoded)
|
||||
return req.Validate()
|
||||
}
|
||||
23
pkg/types/inframonitoringtypes/daemonsets_constants.go
Normal file
23
pkg/types/inframonitoringtypes/daemonsets_constants.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
const (
|
||||
DaemonSetsOrderByCPU = "cpu"
|
||||
DaemonSetsOrderByCPURequest = "cpu_request"
|
||||
DaemonSetsOrderByCPULimit = "cpu_limit"
|
||||
DaemonSetsOrderByMemory = "memory"
|
||||
DaemonSetsOrderByMemoryRequest = "memory_request"
|
||||
DaemonSetsOrderByMemoryLimit = "memory_limit"
|
||||
DaemonSetsOrderByDesiredNodes = "desired_nodes"
|
||||
DaemonSetsOrderByCurrentNodes = "current_nodes"
|
||||
)
|
||||
|
||||
var DaemonSetsValidOrderByKeys = []string{
|
||||
DaemonSetsOrderByCPU,
|
||||
DaemonSetsOrderByCPURequest,
|
||||
DaemonSetsOrderByCPULimit,
|
||||
DaemonSetsOrderByMemory,
|
||||
DaemonSetsOrderByMemoryRequest,
|
||||
DaemonSetsOrderByMemoryLimit,
|
||||
DaemonSetsOrderByDesiredNodes,
|
||||
DaemonSetsOrderByCurrentNodes,
|
||||
}
|
||||
273
pkg/types/inframonitoringtypes/daemonsets_test.go
Normal file
273
pkg/types/inframonitoringtypes/daemonsets_test.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPostableDaemonSets_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req *PostableDaemonSets
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil request",
|
||||
req: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time zero",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 0,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time negative",
|
||||
req: &PostableDaemonSets{
|
||||
Start: -1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "end time zero",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 0,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time greater than end time",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 2000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time equal to end time",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit zero",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 0,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit negative",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: -10,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit exceeds max",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 5001,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "offset negative",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: -5,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy nil is valid",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key cpu and direction asc",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: DaemonSetsOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key memory_limit and direction desc",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: DaemonSetsOrderByMemoryLimit,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key desired_nodes and direction desc",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: DaemonSetsOrderByDesiredNodes,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key current_nodes and direction asc",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: DaemonSetsOrderByCurrentNodes,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with restarts key is rejected",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "restarts",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with invalid key",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "unknown",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key but invalid direction",
|
||||
req: &PostableDaemonSets{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: DaemonSetsOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.req.Validate()
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
29
tests/fixtures/querier.py
vendored
29
tests/fixtures/querier.py
vendored
@@ -450,6 +450,35 @@ def build_scalar_query(
|
||||
return {"type": "builder_query", "spec": spec}
|
||||
|
||||
|
||||
def build_raw_query(
|
||||
name: str,
|
||||
signal: str,
|
||||
*,
|
||||
order: list[dict] | None = None,
|
||||
limit: int | None = None,
|
||||
filter_expression: str | None = None,
|
||||
step_interval: int = DEFAULT_STEP_INTERVAL,
|
||||
disabled: bool = False,
|
||||
) -> dict:
|
||||
spec: dict[str, Any] = {
|
||||
"name": name,
|
||||
"signal": signal,
|
||||
"stepInterval": step_interval,
|
||||
"disabled": disabled,
|
||||
}
|
||||
|
||||
if order:
|
||||
spec["order"] = order
|
||||
|
||||
if limit is not None:
|
||||
spec["limit"] = limit
|
||||
|
||||
if filter_expression:
|
||||
spec["filter"] = {"expression": filter_expression}
|
||||
|
||||
return {"type": "builder_query", "spec": spec}
|
||||
|
||||
|
||||
def build_group_by_field(
|
||||
name: str,
|
||||
field_data_type: str = "string",
|
||||
|
||||
@@ -11,6 +11,7 @@ from fixtures.logs import Logs
|
||||
from fixtures.querier import (
|
||||
build_logs_aggregation,
|
||||
build_order_by,
|
||||
build_raw_query,
|
||||
build_scalar_query,
|
||||
get_column_data_from_response,
|
||||
get_rows,
|
||||
@@ -27,28 +28,33 @@ def _run_query_case(signoz: types.SigNoz, token: str, now: datetime, case: dict[
|
||||
start_ms = case.get("startMs", int((now - timedelta(seconds=10)).timestamp() * 1000))
|
||||
end_ms = case.get("endMs", int(now.timestamp() * 1000))
|
||||
|
||||
aggregation = case.get("aggregation")
|
||||
if aggregation and not isinstance(aggregation, list):
|
||||
aggregations = [build_logs_aggregation(aggregation)]
|
||||
elif aggregation:
|
||||
aggregations = aggregation
|
||||
if case["requestType"] == "raw":
|
||||
query = build_raw_query(
|
||||
name=case["name"],
|
||||
signal="logs",
|
||||
filter_expression=case.get("expression"),
|
||||
order=case.get("order") or [build_order_by("timestamp", "desc")],
|
||||
limit=case.get("limit", 100),
|
||||
step_interval=case.get("stepInterval") or 60,
|
||||
)
|
||||
else:
|
||||
aggregations = []
|
||||
|
||||
order = case.get("order")
|
||||
if order is None and case["requestType"] == "raw":
|
||||
order = [build_order_by("timestamp", "desc")]
|
||||
|
||||
query = build_scalar_query(
|
||||
name=case["name"],
|
||||
signal="logs",
|
||||
aggregations=aggregations,
|
||||
group_by=case.get("groupBy"),
|
||||
order=order,
|
||||
limit=case.get("limit", 100),
|
||||
filter_expression=case.get("expression"),
|
||||
step_interval=case.get("stepInterval") or 60,
|
||||
)
|
||||
aggregation = case.get("aggregation")
|
||||
if aggregation and not isinstance(aggregation, list):
|
||||
aggregations = [build_logs_aggregation(aggregation)]
|
||||
elif aggregation:
|
||||
aggregations = aggregation
|
||||
else:
|
||||
aggregations = []
|
||||
query = build_scalar_query(
|
||||
name=case["name"],
|
||||
signal="logs",
|
||||
aggregations=aggregations,
|
||||
group_by=case.get("groupBy"),
|
||||
order=case.get("order"),
|
||||
limit=case.get("limit", 100),
|
||||
filter_expression=case.get("expression"),
|
||||
step_interval=case.get("stepInterval") or 60,
|
||||
)
|
||||
|
||||
response = make_query_request(
|
||||
signoz=signoz,
|
||||
@@ -636,10 +642,9 @@ def test_select_order_by(
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
|
||||
def _run(case: dict[str, Any]) -> None:
|
||||
query = build_scalar_query(
|
||||
query = build_raw_query(
|
||||
name=case["name"],
|
||||
signal="logs",
|
||||
aggregations=[build_logs_aggregation("count()")],
|
||||
order=case["order"],
|
||||
limit=100,
|
||||
step_interval=60,
|
||||
|
||||
285
tests/integration/tests/querier_json_body/02_non_body_fields.py
Normal file
285
tests/integration/tests/querier_json_body/02_non_body_fields.py
Normal file
@@ -0,0 +1,285 @@
|
||||
import json
|
||||
from collections.abc import Callable
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.logs import Logs
|
||||
from fixtures.querier import (
|
||||
build_group_by_field,
|
||||
build_logs_aggregation,
|
||||
build_order_by,
|
||||
build_raw_query,
|
||||
build_scalar_query,
|
||||
get_rows,
|
||||
get_scalar_table_data,
|
||||
make_query_request,
|
||||
)
|
||||
|
||||
|
||||
def _raw(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
start_ms: int,
|
||||
end_ms: int,
|
||||
name: str,
|
||||
*,
|
||||
expression: str | None = None,
|
||||
order: list[dict] | None = None,
|
||||
limit: int = 100,
|
||||
) -> requests.Response:
|
||||
q = build_raw_query(
|
||||
name=name,
|
||||
signal="logs",
|
||||
filter_expression=expression,
|
||||
order=order or [build_order_by("timestamp", "desc")],
|
||||
limit=limit,
|
||||
step_interval=60,
|
||||
)
|
||||
r = make_query_request(signoz, token, start_ms, end_ms, queries=[q], request_type="raw")
|
||||
assert r.status_code == 200, f"HTTP {r.status_code} for '{name}': {r.text}"
|
||||
return r
|
||||
|
||||
|
||||
def _scalar(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
start_ms: int,
|
||||
end_ms: int,
|
||||
name: str,
|
||||
aggregation: str,
|
||||
*,
|
||||
expression: str | None = None,
|
||||
group_by: list[dict] | None = None,
|
||||
) -> requests.Response:
|
||||
q = build_scalar_query(
|
||||
name=name,
|
||||
signal="logs",
|
||||
aggregations=[build_logs_aggregation(aggregation)],
|
||||
filter_expression=expression,
|
||||
group_by=group_by,
|
||||
step_interval=60,
|
||||
)
|
||||
r = make_query_request(signoz, token, start_ms, end_ms, queries=[q], request_type="scalar")
|
||||
assert r.status_code == 200, f"HTTP {r.status_code} for '{name}': {r.text}"
|
||||
return r
|
||||
|
||||
|
||||
def _body_users(response: requests.Response) -> set[str | None]:
|
||||
return {json.loads(row["data"]["body"]).get("user") for row in get_rows(response)}
|
||||
|
||||
|
||||
def _body_scores(response: requests.Response) -> list[int | None]:
|
||||
return [json.loads(row["data"]["body"]).get("score") for row in get_rows(response)]
|
||||
|
||||
|
||||
def _services(response: requests.Response) -> list[str]:
|
||||
return [row["data"]["resources_string"].get("service.name", "") for row in get_rows(response)]
|
||||
|
||||
|
||||
def _counts(response: requests.Response) -> dict[str, Any]:
|
||||
return {str(row[0]): row[-1] for row in get_scalar_table_data(response.json()) if row}
|
||||
|
||||
|
||||
def _run_case(
|
||||
signoz: types.SigNoz,
|
||||
token: str,
|
||||
start_ms: int,
|
||||
end_ms: int,
|
||||
case: dict[str, Any],
|
||||
) -> None:
|
||||
if case["requestType"] == "raw":
|
||||
response = _raw(signoz, token, start_ms, end_ms, case["name"], expression=case.get("expression"), order=case.get("order"))
|
||||
else:
|
||||
response = _scalar(signoz, token, start_ms, end_ms, case["name"], case["aggregation"], expression=case.get("expression"), group_by=case.get("groupBy"))
|
||||
assert case["validate"](response), f"Validation failed for '{case['name']}': {response.json()}"
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Filter · GroupBy · Aggregation — non-body fields across all three contexts
|
||||
#
|
||||
# Five cases, one dataset. Each case crosses a different combination of
|
||||
# resource attr / log attr / top-level field in WHERE, GROUP BY, and agg:
|
||||
#
|
||||
# case 1 filter resource + log attr + top-level in WHERE (raw)
|
||||
# case 2 group by resource × top-level multi-key (scalar)
|
||||
# case 3 aggregation count_distinct(log attr) grouped by top-level (scalar)
|
||||
# case 4 agg+filter count by resource, body-field WHERE guard (scalar)
|
||||
# case 5 agg+filter count_distinct(resource) by log attr, top-level filter (scalar)
|
||||
#
|
||||
# Data landscape (5 logs):
|
||||
# log1 — auth-svc, GET, INFO, score=80, user=alice
|
||||
# log2 — auth-svc, POST, ERROR, score=90, user=bob
|
||||
# log3 — auth-svc, GET, INFO, score=60, user=carol
|
||||
# log4 — api-gw, GET, WARN, score=70, user=diana
|
||||
# log5 — worker, DELETE, ERROR, score=100, user=eve
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_non_body_filter_groupby_aggregation(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[list[Logs]], None],
|
||||
export_json_types: Callable[[list[Logs]], None],
|
||||
) -> None:
|
||||
now = datetime.now(tz=UTC)
|
||||
start_ms = int((now - timedelta(seconds=10)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
|
||||
log_data = [
|
||||
("auth-svc", "GET", "INFO", {"score": 80, "user": "alice"}),
|
||||
("auth-svc", "POST", "ERROR", {"score": 90, "user": "bob"}),
|
||||
("auth-svc", "GET", "INFO", {"score": 60, "user": "carol"}),
|
||||
("api-gw", "GET", "WARN", {"score": 70, "user": "diana"}),
|
||||
("worker", "DELETE", "ERROR", {"score": 100, "user": "eve"}),
|
||||
]
|
||||
logs_list = [
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=len(log_data) - i),
|
||||
resources={"service.name": svc},
|
||||
attributes={"http.method": method},
|
||||
body_v2=json.dumps(body),
|
||||
body_promoted="",
|
||||
severity_text=sev,
|
||||
)
|
||||
for i, (svc, method, sev, body) in enumerate(log_data)
|
||||
]
|
||||
export_json_types(logs_list)
|
||||
insert_logs(logs_list)
|
||||
token = get_token(email=USER_ADMIN_EMAIL, password=USER_ADMIN_PASSWORD)
|
||||
|
||||
cases = [
|
||||
# 1. Filter — resource + log attr + top-level in WHERE (all three non-body contexts at once)
|
||||
{
|
||||
"name": "filter.cross_context",
|
||||
"requestType": "raw",
|
||||
"expression": 'service.name = "auth-svc" AND http.method = "GET" AND severity_text = "INFO"',
|
||||
"validate": lambda r: len(get_rows(r)) == 2 and _body_users(r) == {"alice", "carol"},
|
||||
},
|
||||
# 2. GroupBy — resource × top-level multi-key, no filter
|
||||
# Proves both contexts resolve correctly as simultaneous GROUP BY keys.
|
||||
{
|
||||
"name": "groupby.resource_x_toplevel",
|
||||
"requestType": "scalar",
|
||||
"expression": None,
|
||||
"groupBy": [build_group_by_field("service.name"), {"name": "severity_text"}],
|
||||
"aggregation": "count()",
|
||||
# auth-svc+INFO=2, auth-svc+ERROR=1, api-gw+WARN=1, worker+ERROR=1
|
||||
"validate": lambda r: (p := {(str(row[0]), str(row[1])): row[-1] for row in get_scalar_table_data(r.json()) if len(row) >= 3}) and p.get(("auth-svc", "INFO")) == 2 and p.get(("auth-svc", "ERROR")) == 1 and p.get(("api-gw", "WARN")) == 1 and p.get(("worker", "ERROR")) == 1,
|
||||
},
|
||||
# 3. Aggregation — count_distinct(log attr) grouped by top-level
|
||||
# ERROR logs use {POST, DELETE} → 2 distinct methods; INFO/WARN use only GET → 1.
|
||||
{
|
||||
"name": "agg.count_distinct_attr_by_toplevel",
|
||||
"requestType": "scalar",
|
||||
"expression": None,
|
||||
"groupBy": [{"name": "severity_text"}],
|
||||
"aggregation": "count_distinct(http.method)",
|
||||
"validate": lambda r: (rows := _counts(r)) and int(rows["INFO"]) == 1 and int(rows["ERROR"]) == 2 and int(rows["WARN"]) == 1,
|
||||
},
|
||||
# 4. Aggregation + body filter — count by resource WHERE body score >= 80
|
||||
# Body field gates the logs; non-body field drives the GROUP BY.
|
||||
{
|
||||
"name": "agg.count_by_resource_body_filter",
|
||||
"requestType": "scalar",
|
||||
"expression": "score >= 80",
|
||||
"groupBy": [build_group_by_field("service.name")],
|
||||
"aggregation": "count()",
|
||||
# score>=80: alice(80), bob(90), eve(100) → auth-svc: 2, worker: 1; api-gw excluded
|
||||
"validate": lambda r: (rows := _counts(r)) and int(rows["auth-svc"]) == 2 and int(rows["worker"]) == 1 and "api-gw" not in rows,
|
||||
},
|
||||
# 5. Aggregation + top-level filter — count_distinct(resource) grouped by log attr
|
||||
# Aggregates a resource attr, groups by a log attr, filtered by a top-level field.
|
||||
{
|
||||
"name": "agg.count_distinct_resource_by_attr_toplevel_filter",
|
||||
"requestType": "scalar",
|
||||
"expression": "severity_text IN ['INFO', 'WARN']",
|
||||
"groupBy": [{"name": "http.method"}],
|
||||
"aggregation": "count_distinct(service.name)",
|
||||
# INFO/WARN logs: GET(auth-svc×2, api-gw) → 2 distinct svcs; POST/DELETE excluded
|
||||
"validate": lambda r: (rows := _counts(r)) and int(rows["GET"]) == 2 and "POST" not in rows and "DELETE" not in rows,
|
||||
},
|
||||
]
|
||||
|
||||
for case in cases:
|
||||
case.setdefault("groupBy", None)
|
||||
_run_case(signoz, token, start_ms, end_ms, case)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# OrderBy — non-body fields as primary sort keys
|
||||
#
|
||||
# Four cases cover every non-body context as the primary ORDER BY key:
|
||||
# orderby.service_asc resource attr (service.name ASC)
|
||||
# orderby.timestamp_desc top-level (timestamp DESC)
|
||||
# orderby.severity_asc top-level (severity_text ASC)
|
||||
# orderby.multi_method_then_score log attr primary, body path secondary
|
||||
#
|
||||
# Data landscape:
|
||||
# log1 — svc-a, GET, INFO, score=80, ts=now-4s
|
||||
# log2 — svc-a, POST, INFO, score=90, ts=now-3s
|
||||
# log3 — svc-b, GET, WARN, score=60, ts=now-2s
|
||||
# log4 — svc-b, DELETE, WARN, score=70, ts=now-1s
|
||||
# ============================================================================
|
||||
|
||||
|
||||
def test_non_body_orderby(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[list[Logs]], None],
|
||||
export_json_types: Callable[[list[Logs]], None],
|
||||
) -> None:
|
||||
now = datetime.now(tz=UTC)
|
||||
start_ms = int((now - timedelta(seconds=10)).timestamp() * 1000)
|
||||
end_ms = int(now.timestamp() * 1000)
|
||||
|
||||
logs_list = [
|
||||
Logs(timestamp=now - timedelta(seconds=4), resources={"service.name": "svc-a"}, attributes={"http.method": "GET"}, body_v2=json.dumps({"score": 80}), body_promoted="", severity_text="INFO"),
|
||||
Logs(timestamp=now - timedelta(seconds=3), resources={"service.name": "svc-a"}, attributes={"http.method": "POST"}, body_v2=json.dumps({"score": 90}), body_promoted="", severity_text="INFO"),
|
||||
Logs(timestamp=now - timedelta(seconds=2), resources={"service.name": "svc-b"}, attributes={"http.method": "GET"}, body_v2=json.dumps({"score": 60}), body_promoted="", severity_text="WARN"),
|
||||
Logs(timestamp=now - timedelta(seconds=1), resources={"service.name": "svc-b"}, attributes={"http.method": "DELETE"}, body_v2=json.dumps({"score": 70}), body_promoted="", severity_text="WARN"),
|
||||
]
|
||||
export_json_types(logs_list)
|
||||
insert_logs(logs_list)
|
||||
token = get_token(email=USER_ADMIN_EMAIL, password=USER_ADMIN_PASSWORD)
|
||||
|
||||
cases = [
|
||||
# resource attr ASC: svc-a×2 before svc-b×2
|
||||
{
|
||||
"name": "orderby.service_asc",
|
||||
"requestType": "raw",
|
||||
"order": [build_order_by("service.name", "asc")],
|
||||
"validate": lambda r: len(get_rows(r)) == 4 and _services(r)[:2] == ["svc-a", "svc-a"] and _services(r)[2:] == ["svc-b", "svc-b"],
|
||||
},
|
||||
# top-level timestamp DESC: ts-1s(svc-b/70), ts-2s(svc-b/60), ts-3s(svc-a/90), ts-4s(svc-a/80)
|
||||
{
|
||||
"name": "orderby.timestamp_desc",
|
||||
"requestType": "raw",
|
||||
"order": [build_order_by("timestamp", "desc")],
|
||||
"validate": lambda r: len(get_rows(r)) == 4 and _body_scores(r) == [70, 60, 90, 80] and _services(r) == ["svc-b", "svc-b", "svc-a", "svc-a"],
|
||||
},
|
||||
# top-level severity_text ASC: INFO(svc-a×2) before WARN(svc-b×2)
|
||||
{
|
||||
"name": "orderby.severity_asc",
|
||||
"requestType": "raw",
|
||||
"order": [build_order_by("severity_text", "asc")],
|
||||
"validate": lambda r: len(get_rows(r)) == 4 and _services(r)[:2] == ["svc-a", "svc-a"] and _services(r)[2:] == ["svc-b", "svc-b"],
|
||||
},
|
||||
# multi-key: http.method ASC then score ASC — DELETE(70), GET(60,80), POST(90)
|
||||
{
|
||||
"name": "orderby.multi_method_then_score",
|
||||
"requestType": "raw",
|
||||
"order": [build_order_by("http.method", "asc"), build_order_by("score", "asc")],
|
||||
# DELETE < GET < POST alphabetically; within GET scores go 60→80
|
||||
"validate": lambda r: len(get_rows(r)) == 4 and _body_scores(r) == [70, 60, 80, 90],
|
||||
},
|
||||
]
|
||||
|
||||
for case in cases:
|
||||
case.setdefault("groupBy", None)
|
||||
_run_case(signoz, token, start_ms, end_ms, case)
|
||||
Reference in New Issue
Block a user