mirror of
https://github.com/SigNoz/signoz.git
synced 2026-04-29 15:10:28 +01:00
Compare commits
133 Commits
nv/v2-dash
...
infraM/v2_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a55e65ae43 | ||
|
|
7f14bc0e0a | ||
|
|
1c9e31ad00 | ||
|
|
8b0e8f666e | ||
|
|
1d89b03f10 | ||
|
|
3a61a78986 | ||
|
|
46e833faba | ||
|
|
4bd7492629 | ||
|
|
401701e036 | ||
|
|
3bec0df0ad | ||
|
|
24fe9a986d | ||
|
|
520e92049c | ||
|
|
92d297ac9d | ||
|
|
eff29aefba | ||
|
|
ca73453c9e | ||
|
|
1d836d674f | ||
|
|
83724b0cde | ||
|
|
3050e37ec7 | ||
|
|
bdbaa32485 | ||
|
|
55b2215025 | ||
|
|
e90378e618 | ||
|
|
4592b78f48 | ||
|
|
d81c99feae | ||
|
|
65a456ff9e | ||
|
|
264577b673 | ||
|
|
b35c6676f9 | ||
|
|
c78c9a42db | ||
|
|
1095caa123 | ||
|
|
9043b49762 | ||
|
|
d4084a7494 | ||
|
|
27c564b3bf | ||
|
|
f02c491828 | ||
|
|
3d53b8f77f | ||
|
|
dffe94fec4 | ||
|
|
b7d4f18aae | ||
|
|
9ad2ec428a | ||
|
|
c9360fcf13 | ||
|
|
b5ab45db20 | ||
|
|
08f76aca78 | ||
|
|
983d4fe4f2 | ||
|
|
833af794c3 | ||
|
|
21b51d1fcc | ||
|
|
56f22682c8 | ||
|
|
9c8359940c | ||
|
|
4050880275 | ||
|
|
5e775f64f2 | ||
|
|
0189f23f46 | ||
|
|
49a36d4e3d | ||
|
|
9407d658ab | ||
|
|
5035712485 | ||
|
|
bab17c3615 | ||
|
|
37b44f4db9 | ||
|
|
99dd6e5f1e | ||
|
|
9c7131fa6a | ||
|
|
ad889a2e1d | ||
|
|
a4f6d0cbf5 | ||
|
|
589bed7c16 | ||
|
|
93843a1f48 | ||
|
|
88c43108fc | ||
|
|
ed4cf540e8 | ||
|
|
9e2dfa9033 | ||
|
|
d98d5d68ee | ||
|
|
2cb1c3b73b | ||
|
|
ae7ca497ad | ||
|
|
a579916961 | ||
|
|
4a16d56abf | ||
|
|
642b5ac3f0 | ||
|
|
a12112619c | ||
|
|
014785f1bc | ||
|
|
58ee797b10 | ||
|
|
82d236742f | ||
|
|
397e1ad5be | ||
|
|
8d6b25ca9b | ||
|
|
5fa6bd8b8d | ||
|
|
bd9977483b | ||
|
|
50fbdfeeef | ||
|
|
e2b1b73e87 | ||
|
|
cb9f3fd3e5 | ||
|
|
232acc343d | ||
|
|
2025afdccc | ||
|
|
d2f4d4af93 | ||
|
|
47ff7bbb8e | ||
|
|
724071c5dc | ||
|
|
4d24979358 | ||
|
|
042943b10a | ||
|
|
48a9be7ec8 | ||
|
|
a9504b2120 | ||
|
|
8755887c4a | ||
|
|
4cb4662b3a | ||
|
|
e6900dabc8 | ||
|
|
c1ba389b63 | ||
|
|
3a1f40234f | ||
|
|
2e4891fa63 | ||
|
|
04ebc0bec7 | ||
|
|
271f9b81ed | ||
|
|
6fa815c294 | ||
|
|
63ec518efb | ||
|
|
c4ca20dd90 | ||
|
|
e56cc4222b | ||
|
|
07d2944d7c | ||
|
|
dea01ae36a | ||
|
|
62ea5b54e2 | ||
|
|
e549a7e42f | ||
|
|
90e2ebb11f | ||
|
|
61baa1be7a | ||
|
|
b946fa665f | ||
|
|
2e049556e4 | ||
|
|
492a5e70d7 | ||
|
|
ba1f2771e8 | ||
|
|
7458fb4855 | ||
|
|
5f55f3938b | ||
|
|
3e8102485c | ||
|
|
861c682ea5 | ||
|
|
c8e5895dff | ||
|
|
82d72e7edb | ||
|
|
a3f8ecaaf1 | ||
|
|
19aada656c | ||
|
|
b21bb4280f | ||
|
|
bc0a4fdb5c | ||
|
|
37fb0e9254 | ||
|
|
aecfa1a174 | ||
|
|
b869d23d94 | ||
|
|
6ee3d44f76 | ||
|
|
462e554107 | ||
|
|
66afa73e6f | ||
|
|
54c604bcf4 | ||
|
|
c1be02ba54 | ||
|
|
d3c7ba8f45 | ||
|
|
039c4a0496 | ||
|
|
51a94b6bbc | ||
|
|
bbfbb94f52 | ||
|
|
d1eb9ef16f | ||
|
|
3db00f8bc3 |
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tag"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
|
||||
@@ -101,8 +100,8 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
|
||||
|
||||
return openfgaauthz.NewProviderFactory(sqlstore, openfgaschema.NewSchema().Get(ctx), openfgaDataStore), nil
|
||||
},
|
||||
func(store sqlstore.SQLStore, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, queryParser queryparser.QueryParser, _ querier.Querier, _ licensing.Licensing, tagModule tag.Module) dashboard.Module {
|
||||
return impldashboard.NewModule(impldashboard.NewStore(store), store, settings, analytics, orgGetter, queryParser, tagModule)
|
||||
func(store sqlstore.SQLStore, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, queryParser queryparser.QueryParser, _ querier.Querier, _ licensing.Licensing) dashboard.Module {
|
||||
return impldashboard.NewModule(impldashboard.NewStore(store), settings, analytics, orgGetter, queryParser)
|
||||
},
|
||||
func(_ licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config] {
|
||||
return noopgateway.NewProviderFactory()
|
||||
|
||||
@@ -42,7 +42,6 @@ import (
|
||||
pkgcloudintegration "github.com/SigNoz/signoz/pkg/modules/cloudintegration/implcloudintegration"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
pkgimpldashboard "github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tag"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
|
||||
@@ -145,8 +144,8 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
|
||||
}
|
||||
return openfgaauthz.NewProviderFactory(sqlstore, openfgaschema.NewSchema().Get(ctx), openfgaDataStore, licensing, onBeforeRoleDelete, dashboardModule), nil
|
||||
},
|
||||
func(store sqlstore.SQLStore, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, queryParser queryparser.QueryParser, querier querier.Querier, licensing licensing.Licensing, tagModule tag.Module) dashboard.Module {
|
||||
return impldashboard.NewModule(pkgimpldashboard.NewStore(store), store, settings, analytics, orgGetter, queryParser, querier, licensing, tagModule)
|
||||
func(store sqlstore.SQLStore, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, queryParser queryparser.QueryParser, querier querier.Querier, licensing licensing.Licensing) dashboard.Module {
|
||||
return impldashboard.NewModule(pkgimpldashboard.NewStore(store), settings, analytics, orgGetter, queryParser, querier, licensing)
|
||||
},
|
||||
func(licensing licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config] {
|
||||
return httpgateway.NewProviderFactory(licensing)
|
||||
|
||||
1265
docs/api/openapi.yml
1265
docs/api/openapi.yml
File diff suppressed because it is too large
Load Diff
@@ -11,15 +11,12 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
pkgimpldashboard "github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tag"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/ctxtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes/dashboardtypesv2"
|
||||
"github.com/SigNoz/signoz/pkg/types/instrumentationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -33,9 +30,9 @@ type module struct {
|
||||
licensing licensing.Licensing
|
||||
}
|
||||
|
||||
func NewModule(store dashboardtypes.Store, sqlstore sqlstore.SQLStore, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, queryParser queryparser.QueryParser, querier querier.Querier, licensing licensing.Licensing, tagModule tag.Module) dashboard.Module {
|
||||
func NewModule(store dashboardtypes.Store, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, queryParser queryparser.QueryParser, querier querier.Querier, licensing licensing.Licensing) dashboard.Module {
|
||||
scopedProviderSettings := factory.NewScopedProviderSettings(settings, "github.com/SigNoz/signoz/ee/modules/dashboard/impldashboard")
|
||||
pkgDashboardModule := pkgimpldashboard.NewModule(store, sqlstore, settings, analytics, orgGetter, queryParser, tagModule)
|
||||
pkgDashboardModule := pkgimpldashboard.NewModule(store, settings, analytics, orgGetter, queryParser)
|
||||
|
||||
return &module{
|
||||
pkgDashboardModule: pkgDashboardModule,
|
||||
@@ -200,18 +197,6 @@ func (module *module) Create(ctx context.Context, orgID valuer.UUID, createdBy s
|
||||
return module.pkgDashboardModule.Create(ctx, orgID, createdBy, creator, data)
|
||||
}
|
||||
|
||||
func (module *module) CreateV2(ctx context.Context, orgID valuer.UUID, createdBy string, creator valuer.UUID, postable dashboardtypesv2.PostableDashboard) (*dashboardtypesv2.Dashboard, error) {
|
||||
return module.pkgDashboardModule.CreateV2(ctx, orgID, createdBy, creator, postable)
|
||||
}
|
||||
|
||||
func (module *module) GetV2(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*dashboardtypesv2.Dashboard, error) {
|
||||
return module.pkgDashboardModule.GetV2(ctx, orgID, id)
|
||||
}
|
||||
|
||||
func (module *module) UpdateV2(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, updateable dashboardtypesv2.UpdateableDashboard) (*dashboardtypesv2.Dashboard, error) {
|
||||
return module.pkgDashboardModule.UpdateV2(ctx, orgID, id, updatedBy, updateable)
|
||||
}
|
||||
|
||||
func (module *module) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*dashboardtypes.Dashboard, error) {
|
||||
return module.pkgDashboardModule.Get(ctx, orgID, id)
|
||||
}
|
||||
|
||||
@@ -18,12 +18,10 @@ import type {
|
||||
} from 'react-query';
|
||||
|
||||
import type {
|
||||
CreateDashboardV2201,
|
||||
CreatePublicDashboard201,
|
||||
CreatePublicDashboardPathParameters,
|
||||
DashboardtypesPostablePublicDashboardDTO,
|
||||
DashboardtypesUpdatablePublicDashboardDTO,
|
||||
Dashboardtypesv2PostableDashboardDTO,
|
||||
DeletePublicDashboardPathParameters,
|
||||
GetPublicDashboard200,
|
||||
GetPublicDashboardData200,
|
||||
@@ -636,88 +634,3 @@ export const invalidateGetPublicDashboardWidgetQueryRange = async (
|
||||
|
||||
return queryClient;
|
||||
};
|
||||
|
||||
/**
|
||||
* This endpoint creates a v2-shape dashboard with structured metadata, a typed data tree, and resolved tags.
|
||||
* @summary Create dashboard (v2)
|
||||
*/
|
||||
export const createDashboardV2 = (
|
||||
dashboardtypesv2PostableDashboardDTO: BodyType<Dashboardtypesv2PostableDashboardDTO>,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<CreateDashboardV2201>({
|
||||
url: `/api/v2/dashboards`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: dashboardtypesv2PostableDashboardDTO,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getCreateDashboardV2MutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof createDashboardV2>>,
|
||||
TError,
|
||||
{ data: BodyType<Dashboardtypesv2PostableDashboardDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof createDashboardV2>>,
|
||||
TError,
|
||||
{ data: BodyType<Dashboardtypesv2PostableDashboardDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['createDashboardV2'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof createDashboardV2>>,
|
||||
{ data: BodyType<Dashboardtypesv2PostableDashboardDTO> }
|
||||
> = (props) => {
|
||||
const { data } = props ?? {};
|
||||
|
||||
return createDashboardV2(data);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type CreateDashboardV2MutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof createDashboardV2>>
|
||||
>;
|
||||
export type CreateDashboardV2MutationBody =
|
||||
BodyType<Dashboardtypesv2PostableDashboardDTO>;
|
||||
export type CreateDashboardV2MutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary Create dashboard (v2)
|
||||
*/
|
||||
export const useCreateDashboardV2 = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof createDashboardV2>>,
|
||||
TError,
|
||||
{ data: BodyType<Dashboardtypesv2PostableDashboardDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof createDashboardV2>>,
|
||||
TError,
|
||||
{ data: BodyType<Dashboardtypesv2PostableDashboardDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getCreateDashboardV2MutationOptions(options);
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
|
||||
@@ -13,8 +13,12 @@ import type {
|
||||
|
||||
import type {
|
||||
InframonitoringtypesPostableHostsDTO,
|
||||
InframonitoringtypesPostableNamespacesDTO,
|
||||
InframonitoringtypesPostableNodesDTO,
|
||||
InframonitoringtypesPostablePodsDTO,
|
||||
ListHosts200,
|
||||
ListNamespaces200,
|
||||
ListNodes200,
|
||||
ListPods200,
|
||||
RenderErrorResponseDTO,
|
||||
} from '../sigNoz.schemas';
|
||||
@@ -106,6 +110,174 @@ export const useListHosts = <
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* Returns a paginated list of Kubernetes namespaces with key aggregated pod metrics: CPU usage and memory working set (summed across pods in the group), plus per-group pod counts bucketed by each pod's latest k8s.pod.phase value in the window (pendingPodCount, runningPodCount, succeededPodCount, failedPodCount, unknownPodCount). Each namespace includes metadata attributes (k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.namespace.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / memory, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (namespaceCPU, namespaceMemory) return -1 as a sentinel when no data is available for that field.
|
||||
* @summary List Namespaces for Infra Monitoring
|
||||
*/
|
||||
export const listNamespaces = (
|
||||
inframonitoringtypesPostableNamespacesDTO: BodyType<InframonitoringtypesPostableNamespacesDTO>,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<ListNamespaces200>({
|
||||
url: `/api/v2/infra_monitoring/namespaces`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: inframonitoringtypesPostableNamespacesDTO,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getListNamespacesMutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listNamespaces>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableNamespacesDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listNamespaces>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableNamespacesDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['listNamespaces'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof listNamespaces>>,
|
||||
{ data: BodyType<InframonitoringtypesPostableNamespacesDTO> }
|
||||
> = (props) => {
|
||||
const { data } = props ?? {};
|
||||
|
||||
return listNamespaces(data);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type ListNamespacesMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof listNamespaces>>
|
||||
>;
|
||||
export type ListNamespacesMutationBody =
|
||||
BodyType<InframonitoringtypesPostableNamespacesDTO>;
|
||||
export type ListNamespacesMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary List Namespaces for Infra Monitoring
|
||||
*/
|
||||
export const useListNamespaces = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listNamespaces>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableNamespacesDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof listNamespaces>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableNamespacesDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getListNamespacesMutationOptions(options);
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* Returns a paginated list of Kubernetes nodes with key metrics: CPU usage, CPU allocatable, memory working set, memory allocatable, and per-group readyNodesCount / notReadyNodesCount derived from each node's latest k8s.node.condition_ready value in the window. Each node includes metadata attributes (k8s.node.uid, k8s.cluster.name). The response type is 'list' for the default k8s.node.name grouping (each row is one node with its current condition string: ready / not_ready / '') or 'grouped_list' for custom groupBy keys (each row aggregates nodes in the group with readyNodesCount and notReadyNodesCount; condition stays empty). Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_allocatable / memory / memory_allocatable, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (nodeCPU, nodeCPUAllocatable, nodeMemory, nodeMemoryAllocatable) return -1 as a sentinel when no data is available for that field.
|
||||
* @summary List Nodes for Infra Monitoring
|
||||
*/
|
||||
export const listNodes = (
|
||||
inframonitoringtypesPostableNodesDTO: BodyType<InframonitoringtypesPostableNodesDTO>,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<ListNodes200>({
|
||||
url: `/api/v2/infra_monitoring/nodes`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: inframonitoringtypesPostableNodesDTO,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getListNodesMutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listNodes>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableNodesDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listNodes>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableNodesDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['listNodes'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof listNodes>>,
|
||||
{ data: BodyType<InframonitoringtypesPostableNodesDTO> }
|
||||
> = (props) => {
|
||||
const { data } = props ?? {};
|
||||
|
||||
return listNodes(data);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type ListNodesMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof listNodes>>
|
||||
>;
|
||||
export type ListNodesMutationBody =
|
||||
BodyType<InframonitoringtypesPostableNodesDTO>;
|
||||
export type ListNodesMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary List Nodes for Infra Monitoring
|
||||
*/
|
||||
export const useListNodes = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listNodes>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableNodesDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof listNodes>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableNodesDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getListNodesMutationOptions(options);
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* Returns a paginated list of Kubernetes pods with key metrics: CPU usage, CPU request/limit utilization, memory working set, memory request/limit utilization, current pod phase (pending/running/succeeded/failed/unknown), and pod age (ms since start time). Each pod includes metadata attributes (namespace, node, workload owner such as deployment/statefulset/daemonset/job/cronjob, cluster). Supports filtering via a filter expression, custom groupBy to aggregate pods by any attribute, ordering by any of the six metrics (cpu, cpu_request, cpu_limit, memory, memory_request, memory_limit), and pagination via offset/limit. The response type is 'list' for the default k8s.pod.uid grouping (each row is one pod with its current phase) or 'grouped_list' for custom groupBy keys (each row aggregates pods in the group with per-phase counts: pendingPodCount, runningPodCount, succeededPodCount, failedPodCount, unknownPodCount derived from each pod's latest phase in the window). Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (podCPU, podCPURequest, podCPULimit, podMemory, podMemoryRequest, podMemoryLimit, podAge) return -1 as a sentinel when no data is available for that field.
|
||||
* @summary List Pods for Infra Monitoring
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -104,12 +104,7 @@ export const usePanelContextMenu = ({
|
||||
}
|
||||
|
||||
if (data && data?.record?.queryName) {
|
||||
onClick(data.coord, {
|
||||
...data.record,
|
||||
label: data.label,
|
||||
seriesColor: data.seriesColor,
|
||||
timeRange,
|
||||
});
|
||||
onClick(data.coord, { ...data.record, label: data.label, timeRange });
|
||||
}
|
||||
},
|
||||
[onClick, queryResponse],
|
||||
|
||||
@@ -196,7 +196,6 @@ export const getUplotClickData = ({
|
||||
coord: { x: number; y: number };
|
||||
record: { queryName: string; filters: FilterData[] };
|
||||
label: string | React.ReactNode;
|
||||
seriesColor?: string;
|
||||
} | null => {
|
||||
if (!queryData?.queryName || !metric) {
|
||||
return null;
|
||||
@@ -209,8 +208,6 @@ export const getUplotClickData = ({
|
||||
|
||||
// Generate label from focusedSeries data
|
||||
let label: string | React.ReactNode = '';
|
||||
const seriesColor = focusedSeries?.color;
|
||||
|
||||
if (focusedSeries && focusedSeries.seriesName) {
|
||||
label = (
|
||||
<span style={{ color: focusedSeries.color }}>
|
||||
@@ -226,7 +223,6 @@ export const getUplotClickData = ({
|
||||
},
|
||||
record,
|
||||
label,
|
||||
seriesColor,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -241,7 +237,6 @@ export const getPieChartClickData = (
|
||||
queryName: string;
|
||||
filters: FilterData[];
|
||||
label: string | React.ReactNode;
|
||||
seriesColor?: string;
|
||||
} | null => {
|
||||
const { metric, queryName } = arc.data.record;
|
||||
if (!queryName || !metric) {
|
||||
@@ -253,7 +248,6 @@ export const getPieChartClickData = (
|
||||
queryName,
|
||||
filters: getFiltersFromMetric(metric), // TODO: add where clause query as well.
|
||||
label,
|
||||
seriesColor: arc.data.color,
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ export interface AggregateData {
|
||||
endTime: number;
|
||||
};
|
||||
label?: string | React.ReactNode;
|
||||
seriesColor?: string;
|
||||
}
|
||||
|
||||
const useAggregateDrilldown = ({
|
||||
|
||||
@@ -228,13 +228,7 @@ const useBaseAggregateOptions = ({
|
||||
return (
|
||||
<ContextMenu.Item
|
||||
key={key}
|
||||
icon={
|
||||
isLoading ? (
|
||||
<LoadingOutlined spin />
|
||||
) : (
|
||||
<span style={{ color: aggregateData?.seriesColor }}>{icon}</span>
|
||||
)
|
||||
}
|
||||
icon={isLoading ? <LoadingOutlined spin /> : icon}
|
||||
onClick={(): void => onClick()}
|
||||
disabled={isLoading}
|
||||
>
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
gap: 8px;
|
||||
padding: 8px;
|
||||
cursor: pointer;
|
||||
color: var(--muted-foreground);
|
||||
color: var(--foreground);
|
||||
font-family: Inter;
|
||||
font-size: var(--font-size-sm);
|
||||
font-weight: 600;
|
||||
@@ -20,10 +20,13 @@
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
|
||||
&:hover,
|
||||
&:hover {
|
||||
background-color: var(--l1-background);
|
||||
}
|
||||
|
||||
&:focus {
|
||||
outline: none;
|
||||
background-color: var(--l2-background-hover);
|
||||
background-color: var(--l1-background);
|
||||
}
|
||||
|
||||
&.disabled {
|
||||
@@ -44,8 +47,7 @@
|
||||
}
|
||||
|
||||
&:hover {
|
||||
background-color: var(--danger-background);
|
||||
color: var(--l1-foreground);
|
||||
background-color: var(--bg-cherry-100);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,24 +74,73 @@
|
||||
}
|
||||
|
||||
.context-menu-header {
|
||||
padding: 8px 12px;
|
||||
border-bottom: 1px solid var(--l2-border);
|
||||
padding-bottom: 4px;
|
||||
border-bottom: 1px solid var(--l1-border);
|
||||
color: var(--muted-foreground);
|
||||
}
|
||||
|
||||
// Target the popover inner specifically for context menu
|
||||
.context-menu .ant-popover-inner {
|
||||
padding: 0;
|
||||
border-radius: 6px;
|
||||
max-width: 300px;
|
||||
background: var(--l2-background) !important;
|
||||
border: 1px solid var(--l2-border) !important;
|
||||
padding: 12px 8px !important;
|
||||
// max-height: 254px !important;
|
||||
max-width: 300px !important;
|
||||
}
|
||||
|
||||
// Dark mode support
|
||||
.darkMode {
|
||||
.context-menu-item {
|
||||
color: var(--muted-foreground);
|
||||
|
||||
&:hover,
|
||||
&:focus {
|
||||
background-color: var(--l2-background);
|
||||
}
|
||||
|
||||
&.danger {
|
||||
color: var(--bg-cherry-400);
|
||||
|
||||
.icon {
|
||||
color: var(--bg-cherry-400);
|
||||
}
|
||||
|
||||
&:hover {
|
||||
background-color: var(--danger-background);
|
||||
color: var(--l1-foreground);
|
||||
}
|
||||
}
|
||||
|
||||
.icon {
|
||||
color: var(--bg-robin-400);
|
||||
}
|
||||
}
|
||||
|
||||
.context-menu-header {
|
||||
border-bottom: 1px solid var(--l1-border);
|
||||
color: var(--muted-foreground);
|
||||
}
|
||||
|
||||
// Set the menu popover background
|
||||
.context-menu .ant-popover-inner {
|
||||
background: var(--l1-background) !important;
|
||||
border: 1px solid var(--border) !important;
|
||||
}
|
||||
}
|
||||
|
||||
// Context menu backdrop overlay
|
||||
.context-menu-backdrop {
|
||||
position: fixed;
|
||||
inset: 0;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100vw;
|
||||
height: 100vh;
|
||||
z-index: 9999;
|
||||
background: transparent;
|
||||
cursor: default;
|
||||
|
||||
// Prevent any pointer events from reaching elements behind
|
||||
pointer-events: auto;
|
||||
|
||||
// Ensure it covers the entire viewport including any scrollable areas
|
||||
position: fixed !important;
|
||||
inset: 0;
|
||||
}
|
||||
|
||||
@@ -7,64 +7,12 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes/dashboardtypesv2"
|
||||
"github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func (provider *provider) addDashboardRoutes(router *mux.Router) error {
|
||||
if err := router.Handle("/api/v2/dashboards", handler.New(provider.authZ.EditAccess(provider.dashboardHandler.CreateV2), handler.OpenAPIDef{
|
||||
ID: "CreateDashboardV2",
|
||||
Tags: []string{"dashboard"},
|
||||
Summary: "Create dashboard (v2)",
|
||||
Description: "This endpoint creates a v2-shape dashboard with structured metadata, a typed data tree, and resolved tags.",
|
||||
Request: new(dashboardtypesv2.PostableDashboard),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(dashboardtypesv2.GettableDashboard),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusCreated,
|
||||
ErrorStatusCodes: []int{},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleEditor),
|
||||
})).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v2/dashboards/{id}", handler.New(provider.authZ.ViewAccess(provider.dashboardHandler.GetV2), handler.OpenAPIDef{
|
||||
ID: "GetDashboardV2",
|
||||
Tags: []string{"dashboard"},
|
||||
Summary: "Get dashboard (v2)",
|
||||
Description: "This endpoint returns a v2-shape dashboard with its tags and public sharing config (if any).",
|
||||
Request: nil,
|
||||
RequestContentType: "",
|
||||
Response: new(dashboardtypesv2.GettableDashboard),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodGet).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v2/dashboards/{id}", handler.New(provider.authZ.EditAccess(provider.dashboardHandler.UpdateV2), handler.OpenAPIDef{
|
||||
ID: "UpdateDashboardV2",
|
||||
Tags: []string{"dashboard"},
|
||||
Summary: "Update dashboard (v2)",
|
||||
Description: "This endpoint updates a v2-shape dashboard's metadata, data, and tag set. Locked dashboards are rejected.",
|
||||
Request: new(dashboardtypesv2.UpdateableDashboard),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(dashboardtypesv2.GettableDashboard),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleEditor),
|
||||
})).Methods(http.MethodPut).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v1/dashboards/{id}/public", handler.New(provider.authZ.AdminAccess(provider.dashboardHandler.CreatePublic), handler.OpenAPIDef{
|
||||
ID: "CreatePublicDashboard",
|
||||
Tags: []string{"dashboard"},
|
||||
|
||||
@@ -48,5 +48,43 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v2/infra_monitoring/nodes", handler.New(
|
||||
provider.authZ.ViewAccess(provider.infraMonitoringHandler.ListNodes),
|
||||
handler.OpenAPIDef{
|
||||
ID: "ListNodes",
|
||||
Tags: []string{"inframonitoring"},
|
||||
Summary: "List Nodes for Infra Monitoring",
|
||||
Description: "Returns a paginated list of Kubernetes nodes with key metrics: CPU usage, CPU allocatable, memory working set, memory allocatable, and per-group readyNodesCount / notReadyNodesCount derived from each node's latest k8s.node.condition_ready value in the window. Each node includes metadata attributes (k8s.node.uid, k8s.cluster.name). The response type is 'list' for the default k8s.node.name grouping (each row is one node with its current condition string: ready / not_ready / '') or 'grouped_list' for custom groupBy keys (each row aggregates nodes in the group with readyNodesCount and notReadyNodesCount; condition stays empty). Supports filtering via a filter expression, custom groupBy, ordering by cpu / cpu_allocatable / memory / memory_allocatable, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (nodeCPU, nodeCPUAllocatable, nodeMemory, nodeMemoryAllocatable) return -1 as a sentinel when no data is available for that field.",
|
||||
Request: new(inframonitoringtypes.PostableNodes),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(inframonitoringtypes.Nodes),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v2/infra_monitoring/namespaces", handler.New(
|
||||
provider.authZ.ViewAccess(provider.infraMonitoringHandler.ListNamespaces),
|
||||
handler.OpenAPIDef{
|
||||
ID: "ListNamespaces",
|
||||
Tags: []string{"inframonitoring"},
|
||||
Summary: "List Namespaces for Infra Monitoring",
|
||||
Description: "Returns a paginated list of Kubernetes namespaces with key aggregated pod metrics: CPU usage and memory working set (summed across pods in the group), plus per-group pod counts bucketed by each pod's latest k8s.pod.phase value in the window (pendingPodCount, runningPodCount, succeededPodCount, failedPodCount, unknownPodCount). Each namespace includes metadata attributes (k8s.namespace.name, k8s.cluster.name). The response type is 'list' for the default k8s.namespace.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates pods in the group. Supports filtering via a filter expression, custom groupBy, ordering by cpu / memory, and pagination via offset/limit. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (namespaceCPU, namespaceMemory) return -1 as a sentinel when no data is available for that field.",
|
||||
Request: new(inframonitoringtypes.PostableNamespaces),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(inframonitoringtypes.Namespaces),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes/dashboardtypesv2"
|
||||
"github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
@@ -53,15 +52,6 @@ type Module interface {
|
||||
statsreporter.StatsCollector
|
||||
|
||||
authz.RegisterTypeable
|
||||
|
||||
// ════════════════════════════════════════════════════════════════════════
|
||||
// v2 dashboard methods
|
||||
// ════════════════════════════════════════════════════════════════════════
|
||||
CreateV2(ctx context.Context, orgID valuer.UUID, createdBy string, creator valuer.UUID, postable dashboardtypesv2.PostableDashboard) (*dashboardtypesv2.Dashboard, error)
|
||||
|
||||
GetV2(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*dashboardtypesv2.Dashboard, error)
|
||||
|
||||
UpdateV2(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, updateable dashboardtypesv2.UpdateableDashboard) (*dashboardtypesv2.Dashboard, error)
|
||||
}
|
||||
|
||||
type Handler interface {
|
||||
@@ -84,13 +74,4 @@ type Handler interface {
|
||||
LockUnlock(http.ResponseWriter, *http.Request)
|
||||
|
||||
Delete(http.ResponseWriter, *http.Request)
|
||||
|
||||
// ════════════════════════════════════════════════════════════════════════
|
||||
// v2 dashboard methods
|
||||
// ════════════════════════════════════════════════════════════════════════
|
||||
CreateV2(http.ResponseWriter, *http.Request)
|
||||
|
||||
GetV2(http.ResponseWriter, *http.Request)
|
||||
|
||||
UpdateV2(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
@@ -10,9 +10,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tag"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
@@ -22,24 +20,20 @@ import (
|
||||
|
||||
type module struct {
|
||||
store dashboardtypes.Store
|
||||
sqlstore sqlstore.SQLStore
|
||||
settings factory.ScopedProviderSettings
|
||||
analytics analytics.Analytics
|
||||
orgGetter organization.Getter
|
||||
queryParser queryparser.QueryParser
|
||||
tagModule tag.Module
|
||||
}
|
||||
|
||||
func NewModule(store dashboardtypes.Store, sqlstore sqlstore.SQLStore, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, queryParser queryparser.QueryParser, tagModule tag.Module) dashboard.Module {
|
||||
func NewModule(store dashboardtypes.Store, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, queryParser queryparser.QueryParser) dashboard.Module {
|
||||
scopedProviderSettings := factory.NewScopedProviderSettings(settings, "github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard")
|
||||
return &module{
|
||||
store: store,
|
||||
sqlstore: sqlstore,
|
||||
settings: scopedProviderSettings,
|
||||
analytics: analytics,
|
||||
orgGetter: orgGetter,
|
||||
queryParser: queryParser,
|
||||
tagModule: tagModule,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,11 +2,9 @@ package impldashboard
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
@@ -65,93 +63,6 @@ func (store *store) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID)
|
||||
return storableDashboard, nil
|
||||
}
|
||||
|
||||
func (store *store) GetV2(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*dashboardtypes.StorableDashboard, *dashboardtypes.StorablePublicDashboard, error) {
|
||||
type joinedRow struct {
|
||||
bun.BaseModel `bun:"table:dashboard,alias:d"`
|
||||
|
||||
ID valuer.UUID `bun:"id"`
|
||||
OrgID valuer.UUID `bun:"org_id"`
|
||||
Data dashboardtypes.StorableDashboardData `bun:"data"`
|
||||
Locked bool `bun:"locked"`
|
||||
CreatedAt time.Time `bun:"created_at"`
|
||||
CreatedBy string `bun:"created_by"`
|
||||
UpdatedAt time.Time `bun:"updated_at"`
|
||||
UpdatedBy string `bun:"updated_by"`
|
||||
|
||||
PublicID *valuer.UUID `bun:"public_id"`
|
||||
PublicCreatedAt *time.Time `bun:"public_created_at"`
|
||||
PublicUpdatedAt *time.Time `bun:"public_updated_at"`
|
||||
PublicTimeRangeEnabled *bool `bun:"public_time_range_enabled"`
|
||||
PublicDefaultTimeRange *string `bun:"public_default_time_range"`
|
||||
}
|
||||
|
||||
row := new(joinedRow)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
NewSelect().
|
||||
Model(row).
|
||||
ColumnExpr("d.id, d.org_id, d.data, d.locked, d.created_at, d.created_by, d.updated_at, d.updated_by").
|
||||
ColumnExpr("pd.id AS public_id, pd.created_at AS public_created_at, pd.updated_at AS public_updated_at, pd.time_range_enabled AS public_time_range_enabled, pd.default_time_range AS public_default_time_range").
|
||||
Join("LEFT JOIN public_dashboard AS pd ON pd.dashboard_id = d.id").
|
||||
Where("d.id = ?", id).
|
||||
Where("d.org_id = ?", orgID).
|
||||
Where("d.deleted_at IS NULL").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, store.sqlstore.WrapNotFoundErrf(err, dashboardtypes.ErrCodeDashboardNotFound, "dashboard with id %s doesn't exist", id)
|
||||
}
|
||||
|
||||
storable := &dashboardtypes.StorableDashboard{
|
||||
Identifiable: types.Identifiable{ID: row.ID},
|
||||
TimeAuditable: types.TimeAuditable{CreatedAt: row.CreatedAt, UpdatedAt: row.UpdatedAt},
|
||||
UserAuditable: types.UserAuditable{CreatedBy: row.CreatedBy, UpdatedBy: row.UpdatedBy},
|
||||
OrgID: row.OrgID,
|
||||
Data: row.Data,
|
||||
Locked: row.Locked,
|
||||
}
|
||||
|
||||
if row.PublicID == nil {
|
||||
return storable, nil, nil
|
||||
}
|
||||
public := &dashboardtypes.StorablePublicDashboard{
|
||||
Identifiable: types.Identifiable{ID: *row.PublicID},
|
||||
TimeAuditable: types.TimeAuditable{CreatedAt: *row.PublicCreatedAt, UpdatedAt: *row.PublicUpdatedAt},
|
||||
TimeRangeEnabled: *row.PublicTimeRangeEnabled,
|
||||
DefaultTimeRange: *row.PublicDefaultTimeRange,
|
||||
DashboardID: row.ID.StringValue(),
|
||||
}
|
||||
return storable, public, nil
|
||||
}
|
||||
|
||||
func (store *store) UpdateV2(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, data dashboardtypes.StorableDashboardData) error {
|
||||
res, err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewUpdate().
|
||||
Model((*dashboardtypes.StorableDashboard)(nil)).
|
||||
Set("data = ?", data).
|
||||
Set("updated_by = ?", updatedBy).
|
||||
Set("updated_at = ?", time.Now()).
|
||||
Where("id = ?", id).
|
||||
Where("org_id = ?", orgID).
|
||||
Where("deleted_at IS NULL").
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows, err := res.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Defends against the race where a soft-delete lands between the caller's
|
||||
// pre-update GetV2 and this update.
|
||||
if rows == 0 {
|
||||
return errors.Newf(errors.TypeNotFound, dashboardtypes.ErrCodeDashboardNotFound, "dashboard with id %s doesn't exist", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *store) GetPublic(ctx context.Context, dashboardID string) (*dashboardtypes.StorablePublicDashboard, error) {
|
||||
storable := new(dashboardtypes.StorablePublicDashboard)
|
||||
err := store.
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
package impldashboard
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes/dashboardtypesv2"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func (handler *handler) CreateV2(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
req := dashboardtypesv2.PostableDashboard{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
dashboard, err := handler.module.CreateV2(ctx, orgID, claims.Email, valuer.MustNewUUID(claims.IdentityID()), req)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusCreated, dashboardtypesv2.NewGettableDashboardFromDashboard(dashboard))
|
||||
}
|
||||
|
||||
func (handler *handler) GetV2(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
id := mux.Vars(r)["id"]
|
||||
if id == "" {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "id is missing in the path"))
|
||||
return
|
||||
}
|
||||
dashboardID, err := valuer.NewUUID(id)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
dashboard, err := handler.module.GetV2(ctx, orgID, dashboardID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, dashboardtypesv2.NewGettableDashboardFromDashboard(dashboard))
|
||||
}
|
||||
|
||||
func (handler *handler) UpdateV2(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
id := mux.Vars(r)["id"]
|
||||
if id == "" {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "id is missing in the path"))
|
||||
return
|
||||
}
|
||||
dashboardID, err := valuer.NewUUID(id)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
req := dashboardtypesv2.UpdateableDashboard{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
dashboard, err := handler.module.UpdateV2(ctx, orgID, dashboardID, claims.Email, req)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, dashboardtypesv2.NewGettableDashboardFromDashboard(dashboard))
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
package impldashboard
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes/dashboardtypesv2"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
func (module *module) CreateV2(ctx context.Context, orgID valuer.UUID, createdBy string, creator valuer.UUID, postable dashboardtypesv2.PostableDashboard) (*dashboardtypesv2.Dashboard, error) {
|
||||
if err := postable.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Tag upserts run outside the dashboard transaction by design: a successful
|
||||
// upsert that loses an outer dashboard insert just leaves resolved tag rows
|
||||
// around for the next attempt — preferable to coupling the two.
|
||||
resolvedTags, err := module.tagModule.CreateMany(ctx, orgID, postable.Tags, createdBy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dashboard := dashboardtypesv2.NewDashboard(orgID, createdBy, postable, resolvedTags)
|
||||
|
||||
storableDashboard, err := dashboard.ToStorableDashboard()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tagIDs := make([]valuer.UUID, len(resolvedTags))
|
||||
for i, t := range resolvedTags {
|
||||
tagIDs[i] = t.ID
|
||||
}
|
||||
|
||||
err = module.sqlstore.RunInTxCtx(ctx, nil, func(ctx context.Context) error {
|
||||
if err := module.store.Create(ctx, storableDashboard); err != nil {
|
||||
return err
|
||||
}
|
||||
return module.tagModule.LinkToEntity(ctx, orgID, dashboardtypes.EntityTypeDashboard, dashboard.ID, tagIDs)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
module.analytics.TrackUser(ctx, orgID.String(), creator.String(), "Dashboard Created", dashboardtypes.NewStatsFromStorableDashboards([]*dashboardtypes.StorableDashboard{storableDashboard}))
|
||||
return dashboard, nil
|
||||
}
|
||||
|
||||
func (module *module) GetV2(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*dashboardtypesv2.Dashboard, error) {
|
||||
storable, public, err := module.store.GetV2(ctx, orgID, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tags, err := module.tagModule.ListForEntity(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dashboardtypesv2.NewDashboardFromStorable(storable, public, tags)
|
||||
}
|
||||
|
||||
func (module *module) UpdateV2(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, updateable dashboardtypesv2.UpdateableDashboard) (*dashboardtypesv2.Dashboard, error) {
|
||||
if err := updateable.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
existing, err := module.GetV2(ctx, orgID, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// safety check before upserting tags. existing.Update also has this checks, but
|
||||
// because existing.Update needs the resolved tags, that method can only be called
|
||||
// after the tags have been resolved.
|
||||
if err := existing.CanUpdate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Tag upserts run outside the update transaction for the same reason as
|
||||
// Create: a successful upsert that loses the outer transaction just leaves
|
||||
// resolved tag rows around for the next attempt.
|
||||
resolvedTags, err := module.tagModule.CreateMany(ctx, orgID, updateable.Tags, updatedBy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagIDs := make([]valuer.UUID, len(resolvedTags))
|
||||
for i, t := range resolvedTags {
|
||||
tagIDs[i] = t.ID
|
||||
}
|
||||
|
||||
if err := existing.Update(updateable, updatedBy, resolvedTags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storable, err := existing.ToStorableDashboard()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = module.sqlstore.RunInTxCtx(ctx, nil, func(ctx context.Context) error {
|
||||
if err := module.tagModule.SyncLinksForEntity(ctx, orgID, dashboardtypes.EntityTypeDashboard, id, tagIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
return module.store.UpdateV2(ctx, orgID, id, updatedBy, storable.Data)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return existing, nil
|
||||
}
|
||||
@@ -69,3 +69,51 @@ func (h *handler) ListPods(rw http.ResponseWriter, req *http.Request) {
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *handler) ListNodes(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
var parsedReq inframonitoringtypes.PostableNodes
|
||||
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.module.ListNodes(req.Context(), orgID, &parsedReq)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *handler) ListNamespaces(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
var parsedReq inframonitoringtypes.PostableNamespaces
|
||||
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.module.ListNamespaces(req.Context(), orgID, &parsedReq)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
@@ -23,3 +23,9 @@ type podPhaseCounts struct {
|
||||
Failed int
|
||||
Unknown int
|
||||
}
|
||||
|
||||
// nodeConditionCounts holds per-group node counts bucketed by latest condition_ready in window.
|
||||
type nodeConditionCounts struct {
|
||||
Ready int
|
||||
NotReady int
|
||||
}
|
||||
|
||||
@@ -242,3 +242,175 @@ func (m *module) ListPods(ctx context.Context, orgID valuer.UUID, req *inframoni
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *module) ListNodes(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableNodes) (*inframonitoringtypes.Nodes, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &inframonitoringtypes.Nodes{}
|
||||
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: inframonitoringtypes.NodesOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
}
|
||||
}
|
||||
|
||||
if len(req.GroupBy) == 0 {
|
||||
req.GroupBy = []qbtypes.GroupByKey{nodeNameGroupByKey}
|
||||
resp.Type = inframonitoringtypes.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
|
||||
}
|
||||
|
||||
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, nodesTableMetricNamesList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(missingMetrics) > 0 {
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
|
||||
resp.Records = []inframonitoringtypes.NodeRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
if req.End < int64(minFirstReportedUnixMilli) {
|
||||
resp.EndTimeBeforeRetention = true
|
||||
resp.Records = []inframonitoringtypes.NodeRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: []string{}}
|
||||
|
||||
metadataMap, err := m.getNodesTableMetadata(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Total = len(metadataMap)
|
||||
|
||||
pageGroups, err := m.getTopNodeGroups(ctx, orgID, req, metadataMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pageGroups) == 0 {
|
||||
resp.Records = []inframonitoringtypes.NodeRecord{}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
filterExpr := ""
|
||||
if req.Filter != nil {
|
||||
filterExpr = req.Filter.Expression
|
||||
}
|
||||
|
||||
fullQueryReq := buildFullQueryRequest(req.Start, req.End, filterExpr, req.GroupBy, pageGroups, m.newNodesTableListQuery())
|
||||
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conditionCounts, err := m.getPerGroupNodeConditionCounts(ctx, req, pageGroups)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isNodeNameInGroupBy := isKeyInGroupByAttrs(req.GroupBy, nodeNameAttrKey)
|
||||
resp.Records = buildNodeRecords(isNodeNameInGroupBy, queryResp, pageGroups, req.GroupBy, metadataMap, conditionCounts)
|
||||
resp.Warning = queryResp.Warning
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *module) ListNamespaces(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableNamespaces) (*inframonitoringtypes.Namespaces, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &inframonitoringtypes.Namespaces{}
|
||||
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: inframonitoringtypes.NamespacesOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
}
|
||||
}
|
||||
|
||||
if len(req.GroupBy) == 0 {
|
||||
req.GroupBy = []qbtypes.GroupByKey{namespaceNameGroupByKey}
|
||||
resp.Type = inframonitoringtypes.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
|
||||
}
|
||||
|
||||
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, namespacesTableMetricNamesList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(missingMetrics) > 0 {
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
|
||||
resp.Records = []inframonitoringtypes.NamespaceRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
if req.End < int64(minFirstReportedUnixMilli) {
|
||||
resp.EndTimeBeforeRetention = true
|
||||
resp.Records = []inframonitoringtypes.NamespaceRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: []string{}}
|
||||
|
||||
metadataMap, err := m.getNamespacesTableMetadata(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Total = len(metadataMap)
|
||||
|
||||
pageGroups, err := m.getTopNamespaceGroups(ctx, orgID, req, metadataMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pageGroups) == 0 {
|
||||
resp.Records = []inframonitoringtypes.NamespaceRecord{}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
filterExpr := ""
|
||||
if req.Filter != nil {
|
||||
filterExpr = req.Filter.Expression
|
||||
}
|
||||
|
||||
fullQueryReq := buildFullQueryRequest(req.Start, req.End, filterExpr, req.GroupBy, pageGroups, m.newNamespacesTableListQuery())
|
||||
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reuse the pods phase-counts CTE function via a temp struct — it reads only
|
||||
// Start/End/Filter/GroupBy from PostablePods.
|
||||
phaseCounts, err := m.getPerGroupPodPhaseCounts(ctx, &inframonitoringtypes.PostablePods{
|
||||
Start: req.Start,
|
||||
End: req.End,
|
||||
Filter: req.Filter,
|
||||
GroupBy: req.GroupBy,
|
||||
}, pageGroups)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Records = buildNamespaceRecords(queryResp, pageGroups, req.GroupBy, metadataMap, phaseCounts)
|
||||
resp.Warning = queryResp.Warning
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
121
pkg/modules/inframonitoring/implinframonitoring/namespaces.go
Normal file
121
pkg/modules/inframonitoring/implinframonitoring/namespaces.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// buildNamespaceRecords assembles the page records. Pod phase counts come from
|
||||
// phaseCounts in both modes; every row is a group of pods, so there's no
|
||||
// per-row "current phase" concept (unlike pods/nodes list mode).
|
||||
func buildNamespaceRecords(
|
||||
resp *qbtypes.QueryRangeResponse,
|
||||
pageGroups []map[string]string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
metadataMap map[string]map[string]string,
|
||||
phaseCounts map[string]podPhaseCounts,
|
||||
) []inframonitoringtypes.NamespaceRecord {
|
||||
metricsMap := parseFullQueryResponse(resp, groupBy)
|
||||
|
||||
records := make([]inframonitoringtypes.NamespaceRecord, 0, len(pageGroups))
|
||||
for _, labels := range pageGroups {
|
||||
compositeKey := compositeKeyFromLabels(labels, groupBy)
|
||||
namespaceName := labels[namespaceNameAttrKey]
|
||||
|
||||
record := inframonitoringtypes.NamespaceRecord{ // initialize with default values
|
||||
NamespaceName: namespaceName,
|
||||
NamespaceCPU: -1,
|
||||
NamespaceMemory: -1,
|
||||
Meta: map[string]any{},
|
||||
}
|
||||
|
||||
if metrics, ok := metricsMap[compositeKey]; ok {
|
||||
if v, exists := metrics["A"]; exists {
|
||||
record.NamespaceCPU = v
|
||||
}
|
||||
if v, exists := metrics["D"]; exists {
|
||||
record.NamespaceMemory = v
|
||||
}
|
||||
}
|
||||
|
||||
if phaseCountsForGroup, ok := phaseCounts[compositeKey]; ok {
|
||||
record.PendingPodCount = phaseCountsForGroup.Pending
|
||||
record.RunningPodCount = phaseCountsForGroup.Running
|
||||
record.SucceededPodCount = phaseCountsForGroup.Succeeded
|
||||
record.FailedPodCount = phaseCountsForGroup.Failed
|
||||
record.UnknownPodCount = phaseCountsForGroup.Unknown
|
||||
}
|
||||
|
||||
if attrs, ok := metadataMap[compositeKey]; ok {
|
||||
for k, v := range attrs {
|
||||
record.Meta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
records = append(records, record)
|
||||
}
|
||||
return records
|
||||
}
|
||||
|
||||
func (m *module) getTopNamespaceGroups(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
req *inframonitoringtypes.PostableNamespaces,
|
||||
metadataMap map[string]map[string]string,
|
||||
) ([]map[string]string, error) {
|
||||
orderByKey := req.OrderBy.Key.Name
|
||||
queryNamesForOrderBy := orderByToNamespacesQueryNames[orderByKey]
|
||||
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
|
||||
|
||||
topReq := &qbtypes.QueryRangeRequest{
|
||||
Start: uint64(req.Start),
|
||||
End: uint64(req.End),
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, envelope := range m.newNamespacesTableListQuery().CompositeQuery.Queries {
|
||||
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
|
||||
continue
|
||||
}
|
||||
copied := envelope
|
||||
if copied.Type == qbtypes.QueryTypeBuilder {
|
||||
existingExpr := ""
|
||||
if f := copied.GetFilter(); f != nil {
|
||||
existingExpr = f.Expression
|
||||
}
|
||||
reqFilterExpr := ""
|
||||
if req.Filter != nil {
|
||||
reqFilterExpr = req.Filter.Expression
|
||||
}
|
||||
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
|
||||
copied.SetFilter(&qbtypes.Filter{Expression: merged})
|
||||
copied.SetGroupBy(req.GroupBy)
|
||||
}
|
||||
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
|
||||
}
|
||||
|
||||
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
|
||||
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
|
||||
}
|
||||
|
||||
func (m *module) getNamespacesTableMetadata(ctx context.Context, req *inframonitoringtypes.PostableNamespaces) (map[string]map[string]string, error) {
|
||||
var nonGroupByAttrs []string
|
||||
for _, key := range namespaceAttrKeysForMetadata {
|
||||
if !isKeyInGroupByAttrs(req.GroupBy, key) {
|
||||
nonGroupByAttrs = append(nonGroupByAttrs, key)
|
||||
}
|
||||
}
|
||||
return m.getMetadata(ctx, namespacesTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
|
||||
}
|
||||
@@ -0,0 +1,92 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
const (
|
||||
namespaceNameAttrKey = "k8s.namespace.name"
|
||||
)
|
||||
|
||||
var namespaceNameGroupByKey = qbtypes.GroupByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: namespaceNameAttrKey,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
}
|
||||
|
||||
// namespacesTableMetricNamesList drives the existence/retention check.
|
||||
// Includes k8s.pod.phase so the response short-circuits cleanly when a
|
||||
// cluster doesn't ship the metric — even though phase isn't part of the
|
||||
// QB composite query (it's queried separately via getPerGroupPodPhaseCounts).
|
||||
var namespacesTableMetricNamesList = []string{
|
||||
"k8s.pod.cpu.usage",
|
||||
"k8s.pod.memory.working_set",
|
||||
"k8s.pod.phase",
|
||||
}
|
||||
|
||||
var namespaceAttrKeysForMetadata = []string{
|
||||
"k8s.namespace.name",
|
||||
"k8s.cluster.name",
|
||||
}
|
||||
|
||||
var orderByToNamespacesQueryNames = map[string][]string{
|
||||
inframonitoringtypes.NamespacesOrderByCPU: {"A"},
|
||||
inframonitoringtypes.NamespacesOrderByMemory: {"D"},
|
||||
}
|
||||
|
||||
// newNamespacesTableListQuery builds the composite QB v5 request for the namespaces list.
|
||||
// Pod phase counts are derived separately via getPerGroupPodPhaseCounts (works for both
|
||||
// list and grouped_list modes), so no phase query is included here.
|
||||
// Query letters A and D are kept aligned with the v1 implementation.
|
||||
func (m *module) newNamespacesTableListQuery() *qbtypes.QueryRangeRequest {
|
||||
queries := []qbtypes.QueryEnvelope{
|
||||
// Query A: CPU usage — sum of pod CPU within the group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.cpu.usage",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{namespaceNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query D: Memory working set — sum of pod memory within the group.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "D",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.pod.memory.working_set",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{namespaceNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: queries,
|
||||
},
|
||||
}
|
||||
}
|
||||
299
pkg/modules/inframonitoring/implinframonitoring/nodes.go
Normal file
299
pkg/modules/inframonitoring/implinframonitoring/nodes.go
Normal file
@@ -0,0 +1,299 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
// buildNodeRecords assembles the page records. Condition counts come from
|
||||
// conditionCounts in both modes. In list mode (isNodeNameInGroupBy=true) each
|
||||
// group is one node, so exactly one count is 1; Condition is derived from
|
||||
// which one. In grouped_list mode Condition stays NodeConditionNone.
|
||||
func buildNodeRecords(
|
||||
isNodeNameInGroupBy bool,
|
||||
resp *qbtypes.QueryRangeResponse,
|
||||
pageGroups []map[string]string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
metadataMap map[string]map[string]string,
|
||||
conditionCounts map[string]nodeConditionCounts,
|
||||
) []inframonitoringtypes.NodeRecord {
|
||||
metricsMap := parseFullQueryResponse(resp, groupBy)
|
||||
|
||||
records := make([]inframonitoringtypes.NodeRecord, 0, len(pageGroups))
|
||||
for _, labels := range pageGroups {
|
||||
compositeKey := compositeKeyFromLabels(labels, groupBy)
|
||||
nodeName := labels[nodeNameAttrKey]
|
||||
|
||||
record := inframonitoringtypes.NodeRecord{ // initialize with default values
|
||||
NodeName: nodeName,
|
||||
Condition: inframonitoringtypes.NodeConditionNone,
|
||||
NodeCPU: -1,
|
||||
NodeCPUAllocatable: -1,
|
||||
NodeMemory: -1,
|
||||
NodeMemoryAllocatable: -1,
|
||||
Meta: map[string]any{},
|
||||
}
|
||||
|
||||
if metrics, ok := metricsMap[compositeKey]; ok {
|
||||
if v, exists := metrics["A"]; exists {
|
||||
record.NodeCPU = v
|
||||
}
|
||||
if v, exists := metrics["B"]; exists {
|
||||
record.NodeCPUAllocatable = v
|
||||
}
|
||||
if v, exists := metrics["C"]; exists {
|
||||
record.NodeMemory = v
|
||||
}
|
||||
if v, exists := metrics["D"]; exists {
|
||||
record.NodeMemoryAllocatable = v
|
||||
}
|
||||
}
|
||||
|
||||
if conditionCountsForGroup, ok := conditionCounts[compositeKey]; ok {
|
||||
record.ReadyNodesCount = conditionCountsForGroup.Ready
|
||||
record.NotReadyNodesCount = conditionCountsForGroup.NotReady
|
||||
|
||||
// In list mode each group is one node; the count==1 bucket identifies the condition.
|
||||
if isNodeNameInGroupBy {
|
||||
switch {
|
||||
case conditionCountsForGroup.Ready == 1:
|
||||
record.Condition = inframonitoringtypes.NodeConditionReady
|
||||
case conditionCountsForGroup.NotReady == 1:
|
||||
record.Condition = inframonitoringtypes.NodeConditionNotReady
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if attrs, ok := metadataMap[compositeKey]; ok {
|
||||
for k, v := range attrs {
|
||||
record.Meta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
records = append(records, record)
|
||||
}
|
||||
return records
|
||||
}
|
||||
|
||||
func (m *module) getTopNodeGroups(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
req *inframonitoringtypes.PostableNodes,
|
||||
metadataMap map[string]map[string]string,
|
||||
) ([]map[string]string, error) {
|
||||
orderByKey := req.OrderBy.Key.Name
|
||||
queryNamesForOrderBy := orderByToNodesQueryNames[orderByKey]
|
||||
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
|
||||
|
||||
topReq := &qbtypes.QueryRangeRequest{
|
||||
Start: uint64(req.Start),
|
||||
End: uint64(req.End),
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, envelope := range m.newNodesTableListQuery().CompositeQuery.Queries {
|
||||
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
|
||||
continue
|
||||
}
|
||||
copied := envelope
|
||||
if copied.Type == qbtypes.QueryTypeBuilder {
|
||||
existingExpr := ""
|
||||
if f := copied.GetFilter(); f != nil {
|
||||
existingExpr = f.Expression
|
||||
}
|
||||
reqFilterExpr := ""
|
||||
if req.Filter != nil {
|
||||
reqFilterExpr = req.Filter.Expression
|
||||
}
|
||||
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
|
||||
copied.SetFilter(&qbtypes.Filter{Expression: merged})
|
||||
copied.SetGroupBy(req.GroupBy)
|
||||
}
|
||||
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
|
||||
}
|
||||
|
||||
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
|
||||
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
|
||||
}
|
||||
|
||||
func (m *module) getNodesTableMetadata(ctx context.Context, req *inframonitoringtypes.PostableNodes) (map[string]map[string]string, error) {
|
||||
var nonGroupByAttrs []string
|
||||
for _, key := range nodeAttrKeysForMetadata {
|
||||
if !isKeyInGroupByAttrs(req.GroupBy, key) {
|
||||
nonGroupByAttrs = append(nonGroupByAttrs, key)
|
||||
}
|
||||
}
|
||||
return m.getMetadata(ctx, nodesTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
|
||||
}
|
||||
|
||||
// getPerGroupNodeConditionCounts computes per-group node counts bucketed by each
|
||||
// node's latest condition_ready value (0 / 1) in the requested window.
|
||||
// Pipeline:
|
||||
//
|
||||
// timeSeriesFPs: fp ↔ (node_name, groupBy cols) from the time_series table.
|
||||
// User filter + page-groups filter applied here.
|
||||
// latestConditionPerNode: INNER JOIN samples × timeSeriesFPs, collapsed to
|
||||
// the latest condition value per node via argMax(value, unix_milli).
|
||||
// countNodesPerCondition: per-group uniqExactIf into ready/not_ready buckets.
|
||||
//
|
||||
// Groups absent from the result map have implicit zero counts (caller default).
|
||||
func (m *module) getPerGroupNodeConditionCounts(
|
||||
ctx context.Context,
|
||||
req *inframonitoringtypes.PostableNodes,
|
||||
pageGroups []map[string]string,
|
||||
) (map[string]nodeConditionCounts, error) {
|
||||
if len(pageGroups) == 0 || len(req.GroupBy) == 0 {
|
||||
return map[string]nodeConditionCounts{}, nil
|
||||
}
|
||||
|
||||
// Merged filter expression (user filter + page-groups IN clauses).
|
||||
reqFilterExpr := ""
|
||||
if req.Filter != nil {
|
||||
reqFilterExpr = req.Filter.Expression
|
||||
}
|
||||
pageGroupsFilterExpr := buildPageGroupsFilterExpr(pageGroups)
|
||||
filterExpr := mergeFilterExpressions(reqFilterExpr, pageGroupsFilterExpr)
|
||||
|
||||
// Resolve tables. Same convention as pods.
|
||||
adjustedStart, adjustedEnd, _, localTimeSeriesTable := telemetrymetrics.WhichTSTableToUse(
|
||||
uint64(req.Start), uint64(req.End), nil,
|
||||
)
|
||||
samplesTable := telemetrymetrics.WhichSamplesTableToUse(
|
||||
uint64(req.Start), uint64(req.End),
|
||||
metrictypes.UnspecifiedType, metrictypes.TimeAggregationUnspecified, nil,
|
||||
)
|
||||
valueCol := telemetrymetrics.ValueColumnForSamplesTable(samplesTable)
|
||||
|
||||
// ----- timeSeriesFPs -----
|
||||
timeSeriesFPs := sqlbuilder.NewSelectBuilder()
|
||||
timeSeriesFPsSelectCols := []string{
|
||||
"fingerprint",
|
||||
fmt.Sprintf("JSONExtractString(labels, %s) AS node_name", timeSeriesFPs.Var(nodeNameAttrKey)),
|
||||
}
|
||||
for _, key := range req.GroupBy {
|
||||
timeSeriesFPsSelectCols = append(timeSeriesFPsSelectCols,
|
||||
fmt.Sprintf("JSONExtractString(labels, %s) AS %s", timeSeriesFPs.Var(key.Name), quoteIdentifier(key.Name)),
|
||||
)
|
||||
}
|
||||
timeSeriesFPs.Select(timeSeriesFPsSelectCols...)
|
||||
timeSeriesFPs.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, localTimeSeriesTable))
|
||||
timeSeriesFPs.Where(
|
||||
timeSeriesFPs.E("metric_name", nodeConditionMetricName),
|
||||
timeSeriesFPs.GE("unix_milli", adjustedStart),
|
||||
timeSeriesFPs.L("unix_milli", adjustedEnd),
|
||||
)
|
||||
if filterExpr != "" {
|
||||
filterClause, err := m.buildFilterClause(ctx, &qbtypes.Filter{Expression: filterExpr}, req.Start, req.End)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if filterClause != nil {
|
||||
timeSeriesFPs.AddWhereClause(filterClause)
|
||||
}
|
||||
}
|
||||
timeSeriesFPsGroupBy := []string{"fingerprint", "node_name"}
|
||||
for _, key := range req.GroupBy {
|
||||
timeSeriesFPsGroupBy = append(timeSeriesFPsGroupBy, quoteIdentifier(key.Name))
|
||||
}
|
||||
timeSeriesFPs.GroupBy(timeSeriesFPsGroupBy...)
|
||||
timeSeriesFPsSQL, timeSeriesFPsArgs := timeSeriesFPs.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
// ----- latestConditionPerNode -----
|
||||
latestConditionPerNode := sqlbuilder.NewSelectBuilder()
|
||||
latestConditionPerNodeSelectCols := []string{"tsfp.node_name AS node_name"}
|
||||
latestConditionPerNodeGroupBy := []string{"node_name"}
|
||||
for _, key := range req.GroupBy {
|
||||
col := quoteIdentifier(key.Name)
|
||||
latestConditionPerNodeSelectCols = append(latestConditionPerNodeSelectCols, fmt.Sprintf("tsfp.%s AS %s", col, col))
|
||||
latestConditionPerNodeGroupBy = append(latestConditionPerNodeGroupBy, col)
|
||||
}
|
||||
latestConditionPerNodeSelectCols = append(latestConditionPerNodeSelectCols,
|
||||
fmt.Sprintf("argMax(samples.%s, samples.unix_milli) AS condition_value", valueCol),
|
||||
)
|
||||
latestConditionPerNode.Select(latestConditionPerNodeSelectCols...)
|
||||
latestConditionPerNode.From(fmt.Sprintf(
|
||||
"%s.%s AS samples INNER JOIN time_series_fps AS tsfp ON samples.fingerprint = tsfp.fingerprint",
|
||||
telemetrymetrics.DBName, samplesTable,
|
||||
))
|
||||
latestConditionPerNode.Where(
|
||||
latestConditionPerNode.E("samples.metric_name", nodeConditionMetricName),
|
||||
latestConditionPerNode.GE("samples.unix_milli", req.Start),
|
||||
latestConditionPerNode.L("samples.unix_milli", req.End),
|
||||
"tsfp.node_name != ''",
|
||||
)
|
||||
latestConditionPerNode.GroupBy(latestConditionPerNodeGroupBy...)
|
||||
latestConditionPerNodeSQL, latestConditionPerNodeArgs := latestConditionPerNode.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
// ----- countNodesPerCondition (outer SELECT) -----
|
||||
countNodesPerConditionSelectCols := make([]string, 0, len(req.GroupBy)+2)
|
||||
countNodesPerConditionGroupBy := make([]string, 0, len(req.GroupBy))
|
||||
for _, key := range req.GroupBy {
|
||||
col := quoteIdentifier(key.Name)
|
||||
countNodesPerConditionSelectCols = append(countNodesPerConditionSelectCols, col)
|
||||
countNodesPerConditionGroupBy = append(countNodesPerConditionGroupBy, col)
|
||||
}
|
||||
countNodesPerConditionSelectCols = append(countNodesPerConditionSelectCols,
|
||||
fmt.Sprintf("uniqExactIf(node_name, condition_value = %d) AS ready_count", inframonitoringtypes.NodeConditionNumReady),
|
||||
fmt.Sprintf("uniqExactIf(node_name, condition_value = %d) AS not_ready_count", inframonitoringtypes.NodeConditionNumNotReady),
|
||||
)
|
||||
countNodesPerConditionSQL := fmt.Sprintf(
|
||||
"SELECT %s FROM latest_condition_per_node GROUP BY %s",
|
||||
strings.Join(countNodesPerConditionSelectCols, ", "),
|
||||
strings.Join(countNodesPerConditionGroupBy, ", "),
|
||||
)
|
||||
|
||||
// Combine CTEs + outer.
|
||||
cteFragments := []string{
|
||||
fmt.Sprintf("time_series_fps AS (%s)", timeSeriesFPsSQL),
|
||||
fmt.Sprintf("latest_condition_per_node AS (%s)", latestConditionPerNodeSQL),
|
||||
}
|
||||
finalSQL := querybuilder.CombineCTEs(cteFragments) + countNodesPerConditionSQL
|
||||
finalArgs := querybuilder.PrependArgs([][]any{timeSeriesFPsArgs, latestConditionPerNodeArgs}, nil)
|
||||
|
||||
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, finalSQL, finalArgs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
result := make(map[string]nodeConditionCounts)
|
||||
for rows.Next() {
|
||||
groupVals := make([]string, len(req.GroupBy))
|
||||
scanPtrs := make([]any, 0, len(req.GroupBy)+2)
|
||||
for i := range groupVals {
|
||||
scanPtrs = append(scanPtrs, &groupVals[i])
|
||||
}
|
||||
var ready, notReady uint64
|
||||
scanPtrs = append(scanPtrs, &ready, ¬Ready)
|
||||
|
||||
if err := rows.Scan(scanPtrs...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result[compositeKeyFromList(groupVals)] = nodeConditionCounts{
|
||||
Ready: int(ready),
|
||||
NotReady: int(notReady),
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -0,0 +1,134 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
const (
|
||||
nodeNameAttrKey = "k8s.node.name"
|
||||
nodeConditionMetricName = "k8s.node.condition_ready"
|
||||
)
|
||||
|
||||
var nodeNameGroupByKey = qbtypes.GroupByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: nodeNameAttrKey,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
}
|
||||
|
||||
// nodesTableMetricNamesList drives the existence/retention check.
|
||||
// Includes condition_ready so the response short-circuits cleanly when a
|
||||
// cluster doesn't ship the metric — even though condition_ready isn't part
|
||||
// of the QB composite query (it's queried separately via getPerGroupNodeConditionCounts).
|
||||
var nodesTableMetricNamesList = []string{
|
||||
"k8s.node.cpu.usage",
|
||||
"k8s.node.allocatable_cpu",
|
||||
"k8s.node.memory.working_set",
|
||||
"k8s.node.allocatable_memory",
|
||||
"k8s.node.condition_ready",
|
||||
}
|
||||
|
||||
var nodeAttrKeysForMetadata = []string{
|
||||
"k8s.node.uid",
|
||||
"k8s.cluster.name",
|
||||
}
|
||||
|
||||
var orderByToNodesQueryNames = map[string][]string{
|
||||
inframonitoringtypes.NodesOrderByCPU: {"A"},
|
||||
inframonitoringtypes.NodesOrderByCPUAllocatable: {"B"},
|
||||
inframonitoringtypes.NodesOrderByMemory: {"C"},
|
||||
inframonitoringtypes.NodesOrderByMemoryAllocatable: {"D"},
|
||||
}
|
||||
|
||||
// newNodesTableListQuery builds the composite QB v5 request for the nodes list.
|
||||
// Node condition is derived separately via getPerGroupNodeConditionCounts (works
|
||||
// for both list and grouped_list modes), so no condition query is included here.
|
||||
func (m *module) newNodesTableListQuery() *qbtypes.QueryRangeRequest {
|
||||
queries := []qbtypes.QueryEnvelope{
|
||||
// Query A: CPU usage
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.node.cpu.usage",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{nodeNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query B: CPU allocatable.
|
||||
// TimeAggregationLatest is the closest v5 equivalent of v1's AnyLast;
|
||||
// allocatable values change rarely so divergence in practice is negligible.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.node.allocatable_cpu",
|
||||
TimeAggregation: metrictypes.TimeAggregationLatest,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{nodeNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query C: Memory working set
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "C",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.node.memory.working_set",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{nodeNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query D: Memory allocatable. Same Latest caveat as Query B.
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "D",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.node.allocatable_memory",
|
||||
TimeAggregation: metrictypes.TimeAggregationLatest,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{nodeNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: queries,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -11,9 +11,13 @@ import (
|
||||
type Handler interface {
|
||||
ListHosts(http.ResponseWriter, *http.Request)
|
||||
ListPods(http.ResponseWriter, *http.Request)
|
||||
ListNodes(http.ResponseWriter, *http.Request)
|
||||
ListNamespaces(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
type Module interface {
|
||||
ListHosts(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableHosts) (*inframonitoringtypes.Hosts, error)
|
||||
ListPods(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostablePods) (*inframonitoringtypes.Pods, error)
|
||||
ListNodes(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableNodes) (*inframonitoringtypes.Nodes, error)
|
||||
ListNamespaces(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableNamespaces) (*inframonitoringtypes.Namespaces, error)
|
||||
}
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
package impltag
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/modules/tag"
|
||||
"github.com/SigNoz/signoz/pkg/types/tagtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type module struct {
|
||||
store tagtypes.Store
|
||||
}
|
||||
|
||||
func NewModule(store tagtypes.Store) tag.Module {
|
||||
return &module{store: store}
|
||||
}
|
||||
|
||||
func (m *module) CreateMany(ctx context.Context, orgID valuer.UUID, postable []tagtypes.PostableTag, createdBy string) ([]*tagtypes.Tag, error) {
|
||||
if len(postable) == 0 {
|
||||
return []*tagtypes.Tag{}, nil
|
||||
}
|
||||
|
||||
toCreate, matched, err := tagtypes.Resolve(ctx, m.store, orgID, postable, createdBy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
created, err := m.store.Create(ctx, toCreate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return append(matched, created...), nil
|
||||
}
|
||||
|
||||
func (m *module) LinkToEntity(ctx context.Context, orgID valuer.UUID, entityType tagtypes.EntityType, entityID valuer.UUID, tagIDs []valuer.UUID) error {
|
||||
if len(tagIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return m.store.CreateRelations(ctx, tagtypes.NewTagRelations(orgID, entityType, entityID, tagIDs))
|
||||
}
|
||||
|
||||
func (m *module) SyncLinksForEntity(ctx context.Context, orgID valuer.UUID, entityType tagtypes.EntityType, entityID valuer.UUID, tagIDs []valuer.UUID) error {
|
||||
if err := m.store.CreateRelations(ctx, tagtypes.NewTagRelations(orgID, entityType, entityID, tagIDs)); err != nil {
|
||||
return err
|
||||
}
|
||||
return m.store.DeleteRelationsExcept(ctx, entityID, tagIDs)
|
||||
}
|
||||
|
||||
func (m *module) ListForEntity(ctx context.Context, entityID valuer.UUID) ([]*tagtypes.Tag, error) {
|
||||
return m.store.ListByEntity(ctx, entityID)
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
package impltag
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/tagtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type store struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func NewStore(sqlstore sqlstore.SQLStore) tagtypes.Store {
|
||||
return &store{sqlstore: sqlstore}
|
||||
}
|
||||
|
||||
func (s *store) List(ctx context.Context, orgID valuer.UUID) ([]*tagtypes.Tag, error) {
|
||||
tags := make([]*tagtypes.Tag, 0)
|
||||
err := s.sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewSelect().
|
||||
Model(&tags).
|
||||
Where("org_id = ?", orgID).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *store) ListByEntity(ctx context.Context, entityID valuer.UUID) ([]*tagtypes.Tag, error) {
|
||||
tags := make([]*tagtypes.Tag, 0)
|
||||
err := s.sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewSelect().
|
||||
Model(&tags).
|
||||
Join("JOIN tag_relations AS tr ON tr.tag_id = tag.id").
|
||||
Where("tr.entity_id = ?", entityID).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *store) Create(ctx context.Context, tags []*tagtypes.Tag) ([]*tagtypes.Tag, error) {
|
||||
if len(tags) == 0 {
|
||||
return tags, nil
|
||||
}
|
||||
// DO UPDATE on a self-set is a deliberate no-op write whose only purpose
|
||||
// is to make RETURNING fire on conflicting rows. Without it, RETURNING is
|
||||
// silent on the conflict path and we'd have to refetch by internal name to
|
||||
// learn the existing rows' IDs after a concurrent-insert race.
|
||||
err := s.sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewInsert().
|
||||
Model(&tags).
|
||||
On("CONFLICT (org_id, internal_name) DO UPDATE").
|
||||
Set("internal_name = EXCLUDED.internal_name").
|
||||
Returning("*").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (s *store) CreateRelations(ctx context.Context, relations []*tagtypes.TagRelation) error {
|
||||
if len(relations) == 0 {
|
||||
return nil
|
||||
}
|
||||
_, err := s.sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewInsert().
|
||||
Model(&relations).
|
||||
On("CONFLICT (entity_id, tag_id) DO NOTHING").
|
||||
Exec(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *store) DeleteRelationsExcept(ctx context.Context, entityID valuer.UUID, keepTagIDs []valuer.UUID) error {
|
||||
q := s.sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewDelete().
|
||||
Model((*tagtypes.TagRelation)(nil)).
|
||||
Where("entity_id = ?", entityID)
|
||||
if len(keepTagIDs) > 0 {
|
||||
q = q.Where("tag_id NOT IN (?)", bun.In(keepTagIDs))
|
||||
}
|
||||
_, err := q.Exec(ctx)
|
||||
return err
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
package impltag
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory/factorytest"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlitesqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/tagtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
func newTestStore(t *testing.T) sqlstore.SQLStore {
|
||||
t.Helper()
|
||||
dbPath := filepath.Join(t.TempDir(), "test.db")
|
||||
store, err := sqlitesqlstore.New(context.Background(), factorytest.NewSettings(), sqlstore.Config{
|
||||
Provider: "sqlite",
|
||||
Connection: sqlstore.ConnectionConfig{
|
||||
MaxOpenConns: 1,
|
||||
MaxConnLifetime: 0,
|
||||
},
|
||||
Sqlite: sqlstore.SqliteConfig{
|
||||
Path: dbPath,
|
||||
Mode: "wal",
|
||||
BusyTimeout: 5 * time.Second,
|
||||
TransactionMode: "deferred",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = store.BunDB().NewCreateTable().
|
||||
Model((*tagtypes.Tag)(nil)).
|
||||
IfNotExists().
|
||||
Exec(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = store.BunDB().Exec(`CREATE UNIQUE INDEX IF NOT EXISTS uq_tag_org_id_internal_name ON tag (org_id, internal_name)`)
|
||||
require.NoError(t, err)
|
||||
return store
|
||||
}
|
||||
|
||||
func tagsByInternalName(t *testing.T, db *bun.DB) map[string]*tagtypes.Tag {
|
||||
t.Helper()
|
||||
all := make([]*tagtypes.Tag, 0)
|
||||
require.NoError(t, db.NewSelect().Model(&all).Scan(context.Background()))
|
||||
out := map[string]*tagtypes.Tag{}
|
||||
for _, tag := range all {
|
||||
out[tag.InternalName] = tag
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestStore_Create_PopulatesIDsOnFreshInsert(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
sqlstore := newTestStore(t)
|
||||
s := NewStore(sqlstore)
|
||||
|
||||
orgID := valuer.GenerateUUID()
|
||||
tagA := tagtypes.NewTag(orgID, "Database", "database", "u@signoz.io")
|
||||
tagB := tagtypes.NewTag(orgID, "team/BLR", "team::blr", "u@signoz.io")
|
||||
preIDA := tagA.ID
|
||||
preIDB := tagB.ID
|
||||
|
||||
got, err := s.Create(ctx, []*tagtypes.Tag{tagA, tagB})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, got, 2)
|
||||
|
||||
// No race → pre-generated IDs stand. The slice is what we passed in,
|
||||
// confirming Scan didn't reallocate.
|
||||
assert.Equal(t, preIDA, got[0].ID)
|
||||
assert.Equal(t, preIDB, got[1].ID)
|
||||
|
||||
// And the rows are in the DB.
|
||||
stored := tagsByInternalName(t, sqlstore.BunDB())
|
||||
require.Contains(t, stored, "database")
|
||||
require.Contains(t, stored, "team::blr")
|
||||
assert.Equal(t, preIDA, stored["database"].ID)
|
||||
assert.Equal(t, preIDB, stored["team::blr"].ID)
|
||||
}
|
||||
|
||||
func TestStore_Create_ConflictReturnsExistingRowID(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
sqlstore := newTestStore(t)
|
||||
s := NewStore(sqlstore)
|
||||
|
||||
orgID := valuer.GenerateUUID()
|
||||
|
||||
// Simulate a concurrent insert: someone else has already inserted "database".
|
||||
winner := tagtypes.NewTag(orgID, "Database", "database", "concurrent")
|
||||
_, err := s.Create(ctx, []*tagtypes.Tag{winner})
|
||||
require.NoError(t, err)
|
||||
winnerID := winner.ID
|
||||
|
||||
// Now our request runs with a different pre-generated ID for the same
|
||||
// internal name. RETURNING should overwrite our stale ID with winner's ID.
|
||||
loser := tagtypes.NewTag(orgID, "Database", "database", "u@signoz.io")
|
||||
loserPreID := loser.ID
|
||||
require.NotEqual(t, winnerID, loserPreID, "pre-generated IDs must differ for this test to be meaningful")
|
||||
|
||||
got, err := s.Create(ctx, []*tagtypes.Tag{loser})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, got, 1)
|
||||
|
||||
assert.Equal(t, winnerID, got[0].ID, "returned slice should carry the existing row's ID, not our stale one")
|
||||
assert.Equal(t, winnerID, loser.ID, "input slice element is mutated in place")
|
||||
|
||||
// And the DB still has exactly one row for that internal name — winner's.
|
||||
stored := tagsByInternalName(t, sqlstore.BunDB())
|
||||
require.Len(t, stored, 1)
|
||||
assert.Equal(t, winnerID, stored["database"].ID)
|
||||
}
|
||||
|
||||
func TestStore_Create_MixedFreshAndConflict(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
sqlstore := newTestStore(t)
|
||||
s := NewStore(sqlstore)
|
||||
|
||||
orgID := valuer.GenerateUUID()
|
||||
pre := tagtypes.NewTag(orgID, "Database", "database", "concurrent")
|
||||
_, err := s.Create(ctx, []*tagtypes.Tag{pre})
|
||||
require.NoError(t, err)
|
||||
preExistingID := pre.ID
|
||||
|
||||
conflict := tagtypes.NewTag(orgID, "Database", "database", "u@signoz.io")
|
||||
fresh := tagtypes.NewTag(orgID, "team/BLR", "team::blr", "u@signoz.io")
|
||||
freshPreID := fresh.ID
|
||||
|
||||
got, err := s.Create(ctx, []*tagtypes.Tag{conflict, fresh})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, got, 2)
|
||||
|
||||
assert.Equal(t, preExistingID, got[0].ID, "conflicting row's ID overwritten with the existing row's")
|
||||
assert.Equal(t, freshPreID, got[1].ID, "fresh row's pre-generated ID is preserved")
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package tag
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/tagtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Module interface {
|
||||
// CreateMany resolves user-supplied tag names against the existing tags for the
|
||||
// org — reusing the casing of any existing parent tag so that
|
||||
// "teams/blr/platform" inherits the "BLR" casing from a pre-existing
|
||||
// "teams/BLR" tag — and inserts any tags that don't yet exist.
|
||||
//
|
||||
// Does not link the resolved tags to any entity — call LinkToEntity for that.
|
||||
CreateMany(ctx context.Context, orgID valuer.UUID, postable []tagtypes.PostableTag, createdBy string) ([]*tagtypes.Tag, error)
|
||||
|
||||
// LinkToEntity inserts (entity, tag) rows in tag_relations. Existing rows
|
||||
// are left untouched. Uses the caller's transaction context if any so that
|
||||
// it can be made atomic with the entity row insert.
|
||||
LinkToEntity(ctx context.Context, orgID valuer.UUID, entityType tagtypes.EntityType, entityID valuer.UUID, tagIDs []valuer.UUID) error
|
||||
|
||||
// missing links are inserted, obsolete ones removed.
|
||||
SyncLinksForEntity(ctx context.Context, orgID valuer.UUID, entityType tagtypes.EntityType, entityID valuer.UUID, tagIDs []valuer.UUID) error
|
||||
|
||||
ListForEntity(ctx context.Context, entityID valuer.UUID) ([]*tagtypes.Tag, error)
|
||||
}
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tag/impltag"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
@@ -45,8 +44,7 @@ func TestNewHandlers(t *testing.T) {
|
||||
emailing := emailingtest.New()
|
||||
queryParser := queryparser.New(providerSettings)
|
||||
require.NoError(t, err)
|
||||
tagModule := impltag.NewModule(impltag.NewStore(sqlstore))
|
||||
dashboardModule := impldashboard.NewModule(impldashboard.NewStore(sqlstore), sqlstore, providerSettings, nil, orgGetter, queryParser, tagModule)
|
||||
dashboardModule := impldashboard.NewModule(impldashboard.NewStore(sqlstore), providerSettings, nil, orgGetter, queryParser)
|
||||
|
||||
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
|
||||
require.NoError(t, err)
|
||||
@@ -54,7 +52,7 @@ func TestNewHandlers(t *testing.T) {
|
||||
userRoleStore := impluser.NewUserRoleStore(sqlstore, providerSettings)
|
||||
|
||||
userGetter := impluser.NewGetter(impluser.NewStore(sqlstore, providerSettings), userRoleStore, flagger)
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, nil, nil, flagger, tagModule)
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, nil, nil, flagger)
|
||||
|
||||
querierHandler := querier.NewHandler(providerSettings, nil, nil)
|
||||
registryHandler := factory.NewHandler(nil)
|
||||
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/session/implsession"
|
||||
"github.com/SigNoz/signoz/pkg/modules/spanpercentile"
|
||||
"github.com/SigNoz/signoz/pkg/modules/spanpercentile/implspanpercentile"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tag"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tracedetail"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tracedetail/impltracedetail"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tracefunnel"
|
||||
@@ -81,7 +80,6 @@ type Modules struct {
|
||||
CloudIntegration cloudintegration.Module
|
||||
RuleStateHistory rulestatehistory.Module
|
||||
TraceDetail tracedetail.Module
|
||||
Tag tag.Module
|
||||
}
|
||||
|
||||
func NewModules(
|
||||
@@ -106,7 +104,6 @@ func NewModules(
|
||||
serviceAccount serviceaccount.Module,
|
||||
cloudIntegrationModule cloudintegration.Module,
|
||||
fl flagger.Flagger,
|
||||
tagModule tag.Module,
|
||||
) Modules {
|
||||
quickfilter := implquickfilter.NewModule(implquickfilter.NewStore(sqlstore))
|
||||
orgSetter := implorganization.NewSetter(implorganization.NewStore(sqlstore), alertmanager, quickfilter)
|
||||
@@ -136,6 +133,5 @@ func NewModules(
|
||||
RuleStateHistory: implrulestatehistory.NewModule(implrulestatehistory.NewStore(telemetryStore, telemetryMetadataStore, providerSettings.Logger)),
|
||||
CloudIntegration: cloudIntegrationModule,
|
||||
TraceDetail: impltracedetail.NewModule(impltracedetail.NewTraceStore(telemetryStore), providerSettings, config.TraceDetail),
|
||||
Tag: tagModule,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount/implserviceaccount"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tag/impltag"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/sharder"
|
||||
@@ -46,8 +45,7 @@ func TestNewModules(t *testing.T) {
|
||||
emailing := emailingtest.New()
|
||||
queryParser := queryparser.New(providerSettings)
|
||||
require.NoError(t, err)
|
||||
tagModule := impltag.NewModule(impltag.NewStore(sqlstore))
|
||||
dashboardModule := impldashboard.NewModule(impldashboard.NewStore(sqlstore), sqlstore, providerSettings, nil, orgGetter, queryParser, tagModule)
|
||||
dashboardModule := impldashboard.NewModule(impldashboard.NewStore(sqlstore), providerSettings, nil, orgGetter, queryParser)
|
||||
|
||||
flagger, err := flagger.New(context.Background(), instrumentationtest.New().ToProviderSettings(), flagger.Config{}, flagger.MustNewRegistry())
|
||||
require.NoError(t, err)
|
||||
@@ -58,7 +56,7 @@ func TestNewModules(t *testing.T) {
|
||||
|
||||
serviceAccount := implserviceaccount.NewModule(implserviceaccount.NewStore(sqlstore), nil, nil, nil, providerSettings, serviceaccount.Config{})
|
||||
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, serviceAccount, implcloudintegration.NewModule(), flagger, tagModule)
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, nil, nil, nil, nil, nil, nil, nil, queryParser, Config{}, dashboardModule, userGetter, userRoleStore, serviceAccount, implcloudintegration.NewModule(), flagger)
|
||||
|
||||
reflectVal := reflect.ValueOf(modules)
|
||||
for i := 0; i < reflectVal.NumField(); i++ {
|
||||
|
||||
@@ -195,8 +195,6 @@ func NewSQLMigrationProviderFactories(
|
||||
sqlmigration.NewServiceAccountAuthzactory(sqlstore),
|
||||
sqlmigration.NewDropUserDeletedAtFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewMigrateAWSAllRegionsFactory(sqlstore),
|
||||
sqlmigration.NewAddTagsFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewAddDashboardSoftDeleteFactory(sqlstore, sqlschema),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -29,8 +29,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/rulestatehistory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount/implserviceaccount"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tag"
|
||||
"github.com/SigNoz/signoz/pkg/modules/tag/impltag"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
@@ -103,7 +101,7 @@ func New(
|
||||
telemetrystoreProviderFactories factory.NamedMap[factory.ProviderFactory[telemetrystore.TelemetryStore, telemetrystore.Config]],
|
||||
authNsCallback func(ctx context.Context, providerSettings factory.ProviderSettings, store authtypes.AuthNStore, licensing licensing.Licensing) (map[authtypes.AuthNProvider]authn.AuthN, error),
|
||||
authzCallback func(context.Context, sqlstore.SQLStore, licensing.Licensing, []authz.OnBeforeRoleDelete, dashboard.Module) (factory.ProviderFactory[authz.AuthZ, authz.Config], error),
|
||||
dashboardModuleCallback func(sqlstore.SQLStore, factory.ProviderSettings, analytics.Analytics, organization.Getter, queryparser.QueryParser, querier.Querier, licensing.Licensing, tag.Module) dashboard.Module,
|
||||
dashboardModuleCallback func(sqlstore.SQLStore, factory.ProviderSettings, analytics.Analytics, organization.Getter, queryparser.QueryParser, querier.Querier, licensing.Licensing) dashboard.Module,
|
||||
gatewayProviderFactory func(licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config],
|
||||
auditorProviderFactories func(licensing.Licensing) factory.NamedMap[factory.ProviderFactory[auditor.Auditor, auditor.Config]],
|
||||
querierHandlerCallback func(factory.ProviderSettings, querier.Querier, analytics.Analytics) querier.Handler,
|
||||
@@ -327,13 +325,8 @@ func New(
|
||||
// Initialize query parser (needed for dashboard module)
|
||||
queryParser := queryparser.New(providerSettings)
|
||||
|
||||
// Initialize tag module — shared across modules that link entities to tags
|
||||
// (currently dashboard; future: alerts, RBAC). Built once here and injected
|
||||
// where needed.
|
||||
tagModule := impltag.NewModule(impltag.NewStore(sqlstore))
|
||||
|
||||
// Initialize dashboard module (needed for authz registry)
|
||||
dashboard := dashboardModuleCallback(sqlstore, providerSettings, analytics, orgGetter, queryParser, querier, licensing, tagModule)
|
||||
dashboard := dashboardModuleCallback(sqlstore, providerSettings, analytics, orgGetter, queryParser, querier, licensing)
|
||||
|
||||
// Initialize user getter
|
||||
userGetter := impluser.NewGetter(userStore, userRoleStore, flagger)
|
||||
@@ -448,7 +441,7 @@ func New(
|
||||
}
|
||||
|
||||
// Initialize all modules
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter, userRoleStore, serviceAccount, cloudIntegrationModule, flagger, tagModule)
|
||||
modules := NewModules(sqlstore, tokenizer, emailing, providerSettings, orgGetter, alertmanager, analytics, querier, telemetrystore, telemetryMetadataStore, authNs, authz, cache, queryParser, config, dashboard, userGetter, userRoleStore, serviceAccount, cloudIntegrationModule, flagger)
|
||||
|
||||
// Initialize ruler from the variant-specific provider factories
|
||||
rulerInstance, err := factory.NewProviderFromNamedMap(ctx, providerSettings, config.Ruler, rulerProviderFactories(cache, alertmanager, sqlstore, telemetrystore, telemetryMetadataStore, prometheus, orgGetter, modules.RuleStateHistory, querier, queryParser), "signoz")
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type addTags struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
sqlschema sqlschema.SQLSchema
|
||||
}
|
||||
|
||||
func NewAddTagsFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_tags"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return &addTags{
|
||||
sqlstore: sqlstore,
|
||||
sqlschema: sqlschema,
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (migration *addTags) Register(migrations *migrate.Migrations) error {
|
||||
return migrations.Register(migration.Up, migration.Down)
|
||||
}
|
||||
|
||||
func (migration *addTags) Up(ctx context.Context, db *bun.DB) error {
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
sqls := [][]byte{}
|
||||
|
||||
tagTableSQLs := migration.sqlschema.Operator().CreateTable(&sqlschema.Table{
|
||||
Name: "tag",
|
||||
Columns: []*sqlschema.Column{
|
||||
{Name: "id", DataType: sqlschema.DataTypeText, Nullable: false},
|
||||
{Name: "name", DataType: sqlschema.DataTypeText, Nullable: false},
|
||||
{Name: "internal_name", DataType: sqlschema.DataTypeText, Nullable: false},
|
||||
{Name: "org_id", DataType: sqlschema.DataTypeText, Nullable: false},
|
||||
{Name: "created_at", DataType: sqlschema.DataTypeTimestamp, Nullable: false},
|
||||
{Name: "created_by", DataType: sqlschema.DataTypeText, Nullable: true},
|
||||
{Name: "updated_at", DataType: sqlschema.DataTypeTimestamp, Nullable: false},
|
||||
{Name: "updated_by", DataType: sqlschema.DataTypeText, Nullable: true},
|
||||
},
|
||||
PrimaryKeyConstraint: &sqlschema.PrimaryKeyConstraint{ColumnNames: []sqlschema.ColumnName{"id"}},
|
||||
ForeignKeyConstraints: []*sqlschema.ForeignKeyConstraint{
|
||||
{
|
||||
ReferencingColumnName: sqlschema.ColumnName("org_id"),
|
||||
ReferencedTableName: sqlschema.TableName("organizations"),
|
||||
ReferencedColumnName: sqlschema.ColumnName("id"),
|
||||
},
|
||||
},
|
||||
})
|
||||
sqls = append(sqls, tagTableSQLs...)
|
||||
|
||||
tagUniqueIndexSQL := migration.sqlschema.Operator().CreateIndex(
|
||||
&sqlschema.UniqueIndex{
|
||||
TableName: "tag",
|
||||
ColumnNames: []sqlschema.ColumnName{"org_id", "internal_name"},
|
||||
})
|
||||
sqls = append(sqls, tagUniqueIndexSQL...)
|
||||
|
||||
tagRelationsTableSQLs := migration.sqlschema.Operator().CreateTable(&sqlschema.Table{
|
||||
Name: "tag_relations",
|
||||
Columns: []*sqlschema.Column{
|
||||
{Name: "entity_type", DataType: sqlschema.DataTypeText, Nullable: false},
|
||||
{Name: "entity_id", DataType: sqlschema.DataTypeText, Nullable: false},
|
||||
{Name: "tag_id", DataType: sqlschema.DataTypeText, Nullable: false},
|
||||
{Name: "org_id", DataType: sqlschema.DataTypeText, Nullable: false},
|
||||
},
|
||||
PrimaryKeyConstraint: &sqlschema.PrimaryKeyConstraint{ColumnNames: []sqlschema.ColumnName{"entity_id", "tag_id"}},
|
||||
ForeignKeyConstraints: []*sqlschema.ForeignKeyConstraint{
|
||||
{
|
||||
ReferencingColumnName: sqlschema.ColumnName("org_id"),
|
||||
ReferencedTableName: sqlschema.TableName("organizations"),
|
||||
ReferencedColumnName: sqlschema.ColumnName("id"),
|
||||
},
|
||||
},
|
||||
})
|
||||
sqls = append(sqls, tagRelationsTableSQLs...)
|
||||
|
||||
for _, sql := range sqls {
|
||||
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (migration *addTags) Down(_ context.Context, _ *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type addDashboardSoftDelete struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
sqlschema sqlschema.SQLSchema
|
||||
}
|
||||
|
||||
func NewAddDashboardSoftDeleteFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_dashboard_soft_delete"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return &addDashboardSoftDelete{
|
||||
sqlstore: sqlstore,
|
||||
sqlschema: sqlschema,
|
||||
}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (migration *addDashboardSoftDelete) Register(migrations *migrate.Migrations) error {
|
||||
return migrations.Register(migration.Up, migration.Down)
|
||||
}
|
||||
|
||||
func (migration *addDashboardSoftDelete) Up(ctx context.Context, db *bun.DB) error {
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
dashboardTable := &sqlschema.Table{Name: "dashboard"}
|
||||
sqls := [][]byte{}
|
||||
sqls = append(sqls, migration.sqlschema.Operator().AddColumn(
|
||||
dashboardTable, nil,
|
||||
&sqlschema.Column{Name: "deleted_at", DataType: sqlschema.DataTypeTimestamp, Nullable: true}, nil,
|
||||
)...)
|
||||
sqls = append(sqls, migration.sqlschema.Operator().AddColumn(
|
||||
dashboardTable, nil,
|
||||
&sqlschema.Column{Name: "deleted_by", DataType: sqlschema.DataTypeText, Nullable: true}, nil,
|
||||
)...)
|
||||
|
||||
for _, sql := range sqls {
|
||||
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (migration *addDashboardSoftDelete) Down(_ context.Context, _ *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
@@ -11,8 +11,3 @@ type UserAuditable struct {
|
||||
CreatedBy string `bun:"created_by,type:text" json:"createdBy"`
|
||||
UpdatedBy string `bun:"updated_by,type:text" json:"updatedBy"`
|
||||
}
|
||||
|
||||
type DeleteAuditable struct {
|
||||
DeletedBy string `bun:"deleted_by,type:text,nullzero" json:"deletedBy,omitempty"`
|
||||
DeletedAt time.Time `bun:"deleted_at,nullzero" json:"deletedAt,omitzero"`
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/tagtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
@@ -20,8 +19,6 @@ var (
|
||||
TypeableMetaResourceDashboard = authtypes.MustNewTypeableMetaResource(authtypes.MustNewName("dashboard"))
|
||||
TypeableMetaResourcePublicDashboard = authtypes.MustNewTypeableMetaResource(authtypes.MustNewName("public-dashboard"))
|
||||
TypeableMetaResourcesDashboards = authtypes.MustNewTypeableMetaResources(authtypes.MustNewName("dashboards"))
|
||||
|
||||
EntityTypeDashboard = tagtypes.MustNewEntityType("dashboard")
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -37,7 +34,6 @@ type StorableDashboard struct {
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
types.DeleteAuditable
|
||||
Data StorableDashboardData `bun:"data,type:text,notnull"`
|
||||
Locked bool `bun:"locked,notnull,default:false"`
|
||||
OrgID valuer.UUID `bun:"org_id,notnull"`
|
||||
|
||||
262
pkg/types/dashboardtypes/dashboard_v2.go
Normal file
262
pkg/types/dashboardtypes/dashboard_v2.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package dashboardtypes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qb "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/go-playground/validator/v10"
|
||||
v1 "github.com/perses/perses/pkg/model/api/v1"
|
||||
"github.com/perses/perses/pkg/model/api/v1/common"
|
||||
"github.com/perses/perses/pkg/model/api/v1/dashboard"
|
||||
)
|
||||
|
||||
// StorableDashboardDataV2 wraps v1.DashboardSpec (Perses) with additional SigNoz-specific fields.
|
||||
//
|
||||
// We embed DashboardSpec (not v1.Dashboard) to avoid carrying Perses's Metadata
|
||||
// (Name, Project, CreatedAt, UpdatedAt, Tags, Version) and Kind field. SigNoz
|
||||
// manages identity (ID), timestamps (TimeAuditable), and multi-tenancy (OrgID)
|
||||
// separately on StorableDashboardV2/DashboardV2.
|
||||
//
|
||||
// The following v1 request fields map to locations inside v1.DashboardSpec:
|
||||
// - title → Display.Name (common.Display)
|
||||
// - description → Display.Description (common.Display)
|
||||
//
|
||||
// Fields that have no Perses equivalent will be added in this wrapper (like image, uploadGrafana, etc.)
|
||||
type StorableDashboardDataV2 = v1.DashboardSpec
|
||||
|
||||
// UnmarshalAndValidateDashboardV2JSON unmarshals the JSON into a StorableDashboardDataV2
|
||||
// (= PostableDashboardV2 = UpdatableDashboardV2) and validates plugin kinds and specs.
|
||||
func UnmarshalAndValidateDashboardV2JSON(data []byte) (*StorableDashboardDataV2, error) {
|
||||
var d StorableDashboardDataV2
|
||||
// Note: DashboardSpec has a custom UnmarshalJSON which prevents
|
||||
// DisallowUnknownFields from working at the top level. Unknown
|
||||
// fields in plugin specs are still rejected by validateAndNormalizePluginSpec.
|
||||
if err := json.Unmarshal(data, &d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateDashboardV2(d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
// Plugin kind → spec type factory. Each value is a pointer to the zero value of the
|
||||
// expected spec struct. validatePluginSpec marshals plugin.Spec back to JSON and
|
||||
// unmarshals into the typed struct to catch field-level errors.
|
||||
var (
|
||||
panelPluginSpecs = map[PanelPluginKind]func() any{
|
||||
PanelKindTimeSeries: func() any { return new(TimeSeriesPanelSpec) },
|
||||
PanelKindBarChart: func() any { return new(BarChartPanelSpec) },
|
||||
PanelKindNumber: func() any { return new(NumberPanelSpec) },
|
||||
PanelKindPieChart: func() any { return new(PieChartPanelSpec) },
|
||||
PanelKindTable: func() any { return new(TablePanelSpec) },
|
||||
PanelKindHistogram: func() any { return new(HistogramPanelSpec) },
|
||||
PanelKindList: func() any { return new(ListPanelSpec) },
|
||||
}
|
||||
queryPluginSpecs = map[QueryPluginKind]func() any{
|
||||
QueryKindBuilder: func() any { return new(BuilderQuerySpec) },
|
||||
QueryKindComposite: func() any { return new(CompositeQuerySpec) },
|
||||
QueryKindFormula: func() any { return new(FormulaSpec) },
|
||||
QueryKindPromQL: func() any { return new(PromQLQuerySpec) },
|
||||
QueryKindClickHouseSQL: func() any { return new(ClickHouseSQLQuerySpec) },
|
||||
QueryKindTraceOperator: func() any { return new(TraceOperatorSpec) },
|
||||
}
|
||||
variablePluginSpecs = map[VariablePluginKind]func() any{
|
||||
VariableKindDynamic: func() any { return new(DynamicVariableSpec) },
|
||||
VariableKindQuery: func() any { return new(QueryVariableSpec) },
|
||||
VariableKindCustom: func() any { return new(CustomVariableSpec) },
|
||||
VariableKindTextbox: func() any { return new(TextboxVariableSpec) },
|
||||
}
|
||||
datasourcePluginSpecs = map[DatasourcePluginKind]func() any{
|
||||
DatasourceKindSigNoz: func() any { return new(struct{}) },
|
||||
}
|
||||
|
||||
// allowedQueryKinds maps each panel plugin kind to the query plugin
|
||||
// kinds it supports. Composite sub-query types are mapped to these
|
||||
// same kind strings via compositeSubQueryTypeToPluginKind.
|
||||
allowedQueryKinds = map[PanelPluginKind][]QueryPluginKind{
|
||||
PanelKindTimeSeries: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindPromQL, QueryKindClickHouseSQL},
|
||||
PanelKindBarChart: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindPromQL, QueryKindClickHouseSQL},
|
||||
PanelKindNumber: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindPromQL, QueryKindClickHouseSQL},
|
||||
PanelKindHistogram: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindPromQL, QueryKindClickHouseSQL},
|
||||
PanelKindPieChart: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindClickHouseSQL},
|
||||
PanelKindTable: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindClickHouseSQL},
|
||||
PanelKindList: {QueryKindBuilder},
|
||||
}
|
||||
|
||||
// compositeSubQueryTypeToPluginKind maps CompositeQuery sub-query type
|
||||
// strings to the equivalent top-level query plugin kind for validation.
|
||||
compositeSubQueryTypeToPluginKind = map[qb.QueryType]QueryPluginKind{
|
||||
qb.QueryTypeBuilder: QueryKindBuilder,
|
||||
qb.QueryTypeFormula: QueryKindFormula,
|
||||
qb.QueryTypeTraceOperator: QueryKindTraceOperator,
|
||||
qb.QueryTypePromQL: QueryKindPromQL,
|
||||
qb.QueryTypeClickHouseSQL: QueryKindClickHouseSQL,
|
||||
}
|
||||
)
|
||||
|
||||
func validateDashboardV2(d StorableDashboardDataV2) error {
|
||||
for name, ds := range d.Datasources {
|
||||
if err := validateDatasourcePlugin(&ds.Plugin, fmt.Sprintf("spec.datasources.%s.plugin", name)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for i, v := range d.Variables {
|
||||
if err := validateVariablePlugin(v, fmt.Sprintf("spec.variables[%d]", i)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for key, panel := range d.Panels {
|
||||
if panel == nil {
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "spec.panels.%s: panel must not be null", key)
|
||||
}
|
||||
path := fmt.Sprintf("spec.panels.%s", key)
|
||||
if err := validatePanelPlugin(&panel.Spec.Plugin, path+".spec.plugin"); err != nil {
|
||||
return err
|
||||
}
|
||||
panelKind := PanelPluginKind(panel.Spec.Plugin.Kind)
|
||||
allowed := allowedQueryKinds[panelKind]
|
||||
for qi := range panel.Spec.Queries {
|
||||
queryPath := fmt.Sprintf("%s.spec.queries[%d].spec.plugin", path, qi)
|
||||
if err := validateQueryPlugin(&panel.Spec.Queries[qi].Spec.Plugin, queryPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateQueryAllowedForPanel(panel.Spec.Queries[qi].Spec.Plugin, allowed, panelKind, queryPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDatasourcePlugin(plugin *common.Plugin, path string) error {
|
||||
kind := DatasourcePluginKind(plugin.Kind)
|
||||
factory, ok := datasourcePluginSpecs[kind]
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput,
|
||||
"%s: unknown datasource plugin kind %q; allowed values: %s", path, kind, formatEnum(kind.Enum()))
|
||||
}
|
||||
return validateAndNormalizePluginSpec(plugin, factory, path)
|
||||
}
|
||||
|
||||
func validateVariablePlugin(v dashboard.Variable, path string) error {
|
||||
switch spec := v.Spec.(type) {
|
||||
case *dashboard.ListVariableSpec:
|
||||
pluginPath := path + ".spec.plugin"
|
||||
kind := VariablePluginKind(spec.Plugin.Kind)
|
||||
factory, ok := variablePluginSpecs[kind]
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput,
|
||||
"%s: unknown variable plugin kind %q; allowed values: %s", pluginPath, kind, formatEnum(kind.Enum()))
|
||||
}
|
||||
return validateAndNormalizePluginSpec(&spec.Plugin, factory, pluginPath)
|
||||
case *dashboard.TextVariableSpec:
|
||||
// TextVariables have no plugin, nothing to validate.
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "%s: unsupported variable kind %q", path, v.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
func validatePanelPlugin(plugin *common.Plugin, path string) error {
|
||||
kind := PanelPluginKind(plugin.Kind)
|
||||
factory, ok := panelPluginSpecs[kind]
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput,
|
||||
"%s: unknown panel plugin kind %q; allowed values: %s", path, kind, formatEnum(kind.Enum()))
|
||||
}
|
||||
return validateAndNormalizePluginSpec(plugin, factory, path)
|
||||
}
|
||||
|
||||
func validateQueryPlugin(plugin *common.Plugin, path string) error {
|
||||
kind := QueryPluginKind(plugin.Kind)
|
||||
factory, ok := queryPluginSpecs[kind]
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput,
|
||||
"%s: unknown query plugin kind %q; allowed values: %s", path, kind, formatEnum(kind.Enum()))
|
||||
}
|
||||
return validateAndNormalizePluginSpec(plugin, factory, path)
|
||||
}
|
||||
|
||||
func formatEnum(values []any) string {
|
||||
parts := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
parts[i] = fmt.Sprintf("`%v`", v)
|
||||
}
|
||||
return strings.Join(parts, ", ")
|
||||
}
|
||||
|
||||
// validateAndNormalizePluginSpec validates the plugin spec and writes the typed
|
||||
// struct (with defaults) back into plugin.Spec so that DB storage and API
|
||||
// responses contain normalized values.
|
||||
func validateAndNormalizePluginSpec(plugin *common.Plugin, factory func() any, path string) error {
|
||||
if plugin.Kind == "" {
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "%s: plugin kind is required", path)
|
||||
}
|
||||
if plugin.Spec == nil {
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "%s: plugin spec is required", path)
|
||||
}
|
||||
// Re-marshal the spec and unmarshal into the typed struct.
|
||||
specJSON, err := json.Marshal(plugin.Spec)
|
||||
if err != nil {
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "%s.spec", path)
|
||||
}
|
||||
target := factory()
|
||||
decoder := json.NewDecoder(bytes.NewReader(specJSON))
|
||||
decoder.DisallowUnknownFields()
|
||||
if err := decoder.Decode(target); err != nil {
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "%s.spec", path)
|
||||
}
|
||||
if err := validator.New().Struct(target); err != nil {
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "%s.spec", path)
|
||||
}
|
||||
// Write the typed struct back so defaults are included.
|
||||
plugin.Spec = target
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateQueryAllowedForPanel checks that the query plugin kind is permitted
|
||||
// for the given panel. For composite queries it recurses into sub-queries.
|
||||
func validateQueryAllowedForPanel(plugin common.Plugin, allowed []QueryPluginKind, panelKind PanelPluginKind, path string) error {
|
||||
queryKind := QueryPluginKind(plugin.Kind)
|
||||
if !slices.Contains(allowed, queryKind) {
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput,
|
||||
"%s: query kind %q is not supported by panel kind %q", path, queryKind, panelKind)
|
||||
}
|
||||
|
||||
// For composite queries, validate each sub-query type.
|
||||
if queryKind == QueryKindComposite && plugin.Spec != nil {
|
||||
specJSON, err := json.Marshal(plugin.Spec)
|
||||
if err != nil {
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "%s.spec", path)
|
||||
}
|
||||
var composite struct {
|
||||
Queries []struct {
|
||||
Type qb.QueryType `json:"type"`
|
||||
} `json:"queries"`
|
||||
}
|
||||
if err := json.Unmarshal(specJSON, &composite); err != nil {
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "%s.spec", path)
|
||||
}
|
||||
for si, sub := range composite.Queries {
|
||||
pluginKind, ok := compositeSubQueryTypeToPluginKind[sub.Type]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if !slices.Contains(allowed, pluginKind) {
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput,
|
||||
"%s.spec.queries[%d]: sub-query type %q is not supported by panel kind %q",
|
||||
path, si, sub.Type, panelKind)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package dashboardtypesv2
|
||||
package dashboardtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -11,37 +11,29 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func unmarshalDashboard(data []byte) (*DashboardData, error) {
|
||||
var d DashboardData
|
||||
if err := json.Unmarshal(data, &d); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
|
||||
func TestValidateBigExample(t *testing.T) {
|
||||
data, err := os.ReadFile("testdata/perses.json")
|
||||
require.NoError(t, err, "reading example file")
|
||||
_, err = unmarshalDashboard(data)
|
||||
_, err = UnmarshalAndValidateDashboardV2JSON(data)
|
||||
require.NoError(t, err, "expected valid dashboard")
|
||||
}
|
||||
|
||||
func TestValidateDashboardWithSections(t *testing.T) {
|
||||
data, err := os.ReadFile("testdata/perses_with_sections.json")
|
||||
require.NoError(t, err, "reading example file")
|
||||
_, err = unmarshalDashboard(data)
|
||||
_, err = UnmarshalAndValidateDashboardV2JSON(data)
|
||||
require.NoError(t, err, "expected valid dashboard")
|
||||
}
|
||||
|
||||
func TestInvalidateNotAJSON(t *testing.T) {
|
||||
_, err := unmarshalDashboard([]byte("not json"))
|
||||
_, err := UnmarshalAndValidateDashboardV2JSON([]byte("not json"))
|
||||
require.Error(t, err, "expected error for invalid JSON")
|
||||
}
|
||||
|
||||
func TestValidateEmptySpec(t *testing.T) {
|
||||
// no variables no panels
|
||||
data := []byte(`{}`)
|
||||
_, err := unmarshalDashboard(data)
|
||||
_, err := UnmarshalAndValidateDashboardV2JSON(data)
|
||||
require.NoError(t, err, "expected valid")
|
||||
}
|
||||
|
||||
@@ -67,13 +59,17 @@ func TestValidateOnlyVariables(t *testing.T) {
|
||||
"kind": "TextVariable",
|
||||
"spec": {
|
||||
"name": "mytext",
|
||||
"value": "default"
|
||||
"value": "default",
|
||||
"plugin": {
|
||||
"kind": "signoz/TextboxVariable",
|
||||
"spec": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"layouts": []
|
||||
}`)
|
||||
_, err := unmarshalDashboard(data)
|
||||
_, err := UnmarshalAndValidateDashboardV2JSON(data)
|
||||
require.NoError(t, err, "expected valid")
|
||||
}
|
||||
|
||||
@@ -152,7 +148,7 @@ func TestInvalidateUnknownPluginKind(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := unmarshalDashboard([]byte(tt.data))
|
||||
_, err := UnmarshalAndValidateDashboardV2JSON([]byte(tt.data))
|
||||
require.Error(t, err, "expected error containing %q, got nil", tt.wantContain)
|
||||
require.Contains(t, err.Error(), tt.wantContain, "error should mention %q", tt.wantContain)
|
||||
})
|
||||
@@ -173,7 +169,7 @@ func TestInvalidateOneInvalidPanel(t *testing.T) {
|
||||
},
|
||||
"layouts": []
|
||||
}`)
|
||||
_, err := unmarshalDashboard(data)
|
||||
_, err := UnmarshalAndValidateDashboardV2JSON(data)
|
||||
require.Error(t, err, "expected error for invalid panel plugin kind")
|
||||
require.Contains(t, err.Error(), "FakePanel", "error should mention FakePanel")
|
||||
}
|
||||
@@ -249,7 +245,7 @@ func TestRejectUnknownFieldsInPluginSpec(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := unmarshalDashboard([]byte(tt.data))
|
||||
_, err := UnmarshalAndValidateDashboardV2JSON([]byte(tt.data))
|
||||
require.Error(t, err, "expected error for unknown field")
|
||||
require.Contains(t, err.Error(), tt.wantContain, "error should mention %q", tt.wantContain)
|
||||
})
|
||||
@@ -327,7 +323,7 @@ func TestInvalidateWrongFieldTypeInPluginSpec(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := unmarshalDashboard([]byte(tt.data))
|
||||
_, err := UnmarshalAndValidateDashboardV2JSON([]byte(tt.data))
|
||||
require.Error(t, err, "expected validation error")
|
||||
if tt.wantContain != "" {
|
||||
require.Contains(t, err.Error(), tt.wantContain, "error should mention %q", tt.wantContain)
|
||||
@@ -535,7 +531,7 @@ func TestInvalidateBadPanelSpecValues(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := unmarshalDashboard([]byte(tt.data))
|
||||
_, err := UnmarshalAndValidateDashboardV2JSON([]byte(tt.data))
|
||||
require.Error(t, err, "expected error containing %q, got nil", tt.wantContain)
|
||||
require.Contains(t, err.Error(), tt.wantContain, "error should mention %q", tt.wantContain)
|
||||
})
|
||||
@@ -630,7 +626,7 @@ func TestValidateRequiredFields(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, err := unmarshalDashboard([]byte(tt.data))
|
||||
_, err := UnmarshalAndValidateDashboardV2JSON([]byte(tt.data))
|
||||
require.Error(t, err, "expected error containing %q, got nil", tt.wantContain)
|
||||
require.Contains(t, err.Error(), tt.wantContain, "error should mention %q", tt.wantContain)
|
||||
})
|
||||
@@ -652,7 +648,7 @@ func TestTimeSeriesPanelDefaults(t *testing.T) {
|
||||
},
|
||||
"layouts": []
|
||||
}`)
|
||||
d, err := unmarshalDashboard(data)
|
||||
d, err := UnmarshalAndValidateDashboardV2JSON(data)
|
||||
require.NoError(t, err, "unmarshal and validate failed")
|
||||
|
||||
// After validation+normalization, the plugin spec should be a typed struct.
|
||||
@@ -699,7 +695,7 @@ func TestNumberPanelDefaults(t *testing.T) {
|
||||
},
|
||||
"layouts": []
|
||||
}`)
|
||||
d, err := unmarshalDashboard(data)
|
||||
d, err := UnmarshalAndValidateDashboardV2JSON(data)
|
||||
require.NoError(t, err, "unmarshal and validate failed")
|
||||
|
||||
require.IsType(t, &NumberPanelSpec{}, d.Panels["p1"].Spec.Plugin.Spec)
|
||||
@@ -720,30 +716,6 @@ func TestNumberPanelDefaults(t *testing.T) {
|
||||
"expected stored/response JSON to contain operator:>, got: %s", outputStr)
|
||||
}
|
||||
|
||||
// TestPersesFixtureStorageRoundTrip exercises the typed → map[string]any →
|
||||
// typed cycle that the create/get path performs against the kitchen-sink
|
||||
// fixture. Catches plugin specs whose UnmarshalJSON expects a different shape
|
||||
// than the default MarshalJSON emits.
|
||||
func TestPersesFixtureStorageRoundTrip(t *testing.T) {
|
||||
raw, err := os.ReadFile("testdata/perses.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
var data DashboardData
|
||||
require.NoError(t, json.Unmarshal(raw, &data), "initial unmarshal")
|
||||
|
||||
marshaled, err := json.Marshal(data)
|
||||
require.NoError(t, err, "marshal typed → JSON")
|
||||
|
||||
var asMap map[string]any
|
||||
require.NoError(t, json.Unmarshal(marshaled, &asMap), "JSON → map (storage shape)")
|
||||
|
||||
remarshaled, err := json.Marshal(asMap)
|
||||
require.NoError(t, err, "map → JSON (read-back shape)")
|
||||
|
||||
var roundtripped DashboardData
|
||||
require.NoError(t, json.Unmarshal(remarshaled, &roundtripped), "JSON → typed (the failure mode)")
|
||||
}
|
||||
|
||||
// TestStorageRoundTrip simulates the future DB store/load cycle:
|
||||
// marshal the normalized dashboard to JSON (what would be written to DB),
|
||||
// then unmarshal it back (what would be read from DB), and verify defaults survive.
|
||||
@@ -773,7 +745,7 @@ func TestStorageRoundTrip(t *testing.T) {
|
||||
}`)
|
||||
|
||||
// Step 1: Unmarshal + validate + normalize (what the API handler does).
|
||||
d, err := unmarshalDashboard(input)
|
||||
d, err := UnmarshalAndValidateDashboardV2JSON(input)
|
||||
require.NoError(t, err, "unmarshal and validate failed")
|
||||
|
||||
// Step 1.5: Verify struct fields have correct defaults (extra validation before storing).
|
||||
@@ -793,7 +765,7 @@ func TestStorageRoundTrip(t *testing.T) {
|
||||
require.NoError(t, err, "marshal for storage failed")
|
||||
|
||||
// Step 3: Unmarshal from JSON (simulates reading from DB).
|
||||
loaded, err := unmarshalDashboard(stored)
|
||||
loaded, err := UnmarshalAndValidateDashboardV2JSON(stored)
|
||||
require.NoError(t, err, "unmarshal from storage failed")
|
||||
|
||||
// Step 3.5: Verify struct fields have correct defaults after loading (before returning in API).
|
||||
@@ -906,7 +878,7 @@ func TestPanelTypeQueryTypeCompatibility(t *testing.T) {
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, err := unmarshalDashboard(tc.data)
|
||||
_, err := UnmarshalAndValidateDashboardV2JSON(tc.data)
|
||||
if tc.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
@@ -1,218 +0,0 @@
|
||||
package dashboardtypesv2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/tagtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
const (
|
||||
SchemaVersion = "v6"
|
||||
MaxTagsPerDashboard = 5
|
||||
)
|
||||
|
||||
type Dashboard struct {
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
OrgID valuer.UUID `json:"orgId"`
|
||||
Locked bool `json:"locked"`
|
||||
Info DashboardInfo `json:"info"`
|
||||
PublicConfig *dashboardtypes.PublicDashboard `json:"publicConfig,omitempty"`
|
||||
}
|
||||
|
||||
// DashboardInfo is the serializable view of a dashboard's contents — what the UI renders as "the dashboard JSON".
|
||||
type DashboardInfo struct {
|
||||
StoredDashboardInfo
|
||||
Tags []*tagtypes.Tag `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
// StoredDashboardInfo is exactly what serializes into the dashboard.data column.
|
||||
type StoredDashboardInfo struct {
|
||||
Metadata DashboardMetadata `json:"metadata"`
|
||||
Data DashboardData `json:"data"`
|
||||
}
|
||||
|
||||
type DashboardMetadata struct {
|
||||
SchemaVersion string `json:"schemaVersion"`
|
||||
Image string `json:"image,omitempty"`
|
||||
UploadedGrafana bool `json:"uploadedGrafana"`
|
||||
}
|
||||
|
||||
type PostableDashboard struct {
|
||||
StoredDashboardInfo
|
||||
Tags []tagtypes.PostableTag `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
// UpdateableDashboard is the request shape for PUT /api/v2/dashboards/{id}.
|
||||
// Identical to PostableDashboard today; aliased so the surface reads cleanly.
|
||||
type UpdateableDashboard = PostableDashboard
|
||||
|
||||
func (p *PostableDashboard) UnmarshalJSON(data []byte) error {
|
||||
dec := json.NewDecoder(bytes.NewReader(data))
|
||||
dec.DisallowUnknownFields()
|
||||
type alias PostableDashboard
|
||||
var tmp alias
|
||||
if err := dec.Decode(&tmp); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "%s", err.Error())
|
||||
}
|
||||
*p = PostableDashboard(tmp)
|
||||
return p.Validate()
|
||||
}
|
||||
|
||||
func (p *PostableDashboard) Validate() error {
|
||||
if p.Metadata.SchemaVersion != SchemaVersion {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "metadata.schemaVersion must be %q, got %q", SchemaVersion, p.Metadata.SchemaVersion)
|
||||
}
|
||||
if p.Data.Display == nil || p.Data.Display.Name == "" {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "data.display.name is required")
|
||||
}
|
||||
if len(p.Tags) > MaxTagsPerDashboard {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "a dashboard can have at most %d tags", MaxTagsPerDashboard)
|
||||
}
|
||||
return p.Data.Validate()
|
||||
}
|
||||
|
||||
type GettableDashboard struct {
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
OrgID valuer.UUID `json:"orgId"`
|
||||
Locked bool `json:"locked"`
|
||||
Info GettableDashboardInfo `json:"info"`
|
||||
PublicConfig *dashboardtypes.GettablePublicDasbhboard `json:"publicConfig,omitempty"`
|
||||
}
|
||||
|
||||
type GettableDashboardInfo struct {
|
||||
StoredDashboardInfo
|
||||
Tags []*tagtypes.GettableTag `json:"tags,omitempty"`
|
||||
}
|
||||
|
||||
func NewGettableDashboardFromDashboard(dashboard *Dashboard) *GettableDashboard {
|
||||
gettable := &GettableDashboard{
|
||||
Identifiable: dashboard.Identifiable,
|
||||
TimeAuditable: dashboard.TimeAuditable,
|
||||
UserAuditable: dashboard.UserAuditable,
|
||||
OrgID: dashboard.OrgID,
|
||||
Locked: dashboard.Locked,
|
||||
Info: GettableDashboardInfo{
|
||||
StoredDashboardInfo: dashboard.Info.StoredDashboardInfo,
|
||||
Tags: tagtypes.NewGettableTagsFromTags(dashboard.Info.Tags),
|
||||
},
|
||||
}
|
||||
if dashboard.PublicConfig != nil {
|
||||
gettable.PublicConfig = dashboardtypes.NewGettablePublicDashboard(dashboard.PublicConfig)
|
||||
}
|
||||
return gettable
|
||||
}
|
||||
|
||||
func NewDashboard(orgID valuer.UUID, createdBy string, postable PostableDashboard, resolvedTags []*tagtypes.Tag) *Dashboard {
|
||||
now := time.Now()
|
||||
|
||||
return &Dashboard{
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
TimeAuditable: types.TimeAuditable{CreatedAt: now, UpdatedAt: now},
|
||||
UserAuditable: types.UserAuditable{CreatedBy: createdBy, UpdatedBy: createdBy},
|
||||
OrgID: orgID,
|
||||
Locked: false,
|
||||
Info: DashboardInfo{
|
||||
StoredDashboardInfo: StoredDashboardInfo{
|
||||
Metadata: postable.Metadata,
|
||||
Data: postable.Data,
|
||||
},
|
||||
Tags: resolvedTags,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// rejects rows that don't carry a v2-shape blob — those are pre-migration v1 dashboards that the v2 API can't render.
|
||||
func NewDashboardFromStorable(storable *dashboardtypes.StorableDashboard, public *dashboardtypes.StorablePublicDashboard, tags []*tagtypes.Tag) (*Dashboard, error) {
|
||||
metadata, _ := storable.Data["metadata"].(map[string]any)
|
||||
if metadata == nil || metadata["schemaVersion"] != SchemaVersion {
|
||||
return nil, errors.Newf(errors.TypeUnsupported, dashboardtypes.ErrCodeDashboardInvalidData, "dashboard %s is not in %s schema", storable.ID, SchemaVersion)
|
||||
}
|
||||
|
||||
raw, err := json.Marshal(storable.Data)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "marshal stored v2 dashboard data")
|
||||
}
|
||||
var stored StoredDashboardInfo
|
||||
if err := json.Unmarshal(raw, &stored); err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "unmarshal stored v2 dashboard data")
|
||||
}
|
||||
|
||||
var publicConfig *dashboardtypes.PublicDashboard
|
||||
if public != nil {
|
||||
publicConfig = dashboardtypes.NewPublicDashboardFromStorablePublicDashboard(public)
|
||||
}
|
||||
|
||||
return &Dashboard{
|
||||
Identifiable: storable.Identifiable,
|
||||
TimeAuditable: storable.TimeAuditable,
|
||||
UserAuditable: storable.UserAuditable,
|
||||
OrgID: storable.OrgID,
|
||||
Locked: storable.Locked,
|
||||
Info: DashboardInfo{
|
||||
StoredDashboardInfo: stored,
|
||||
Tags: tags,
|
||||
},
|
||||
PublicConfig: publicConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *Dashboard) CanUpdate() error {
|
||||
if d.Locked {
|
||||
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "cannot update a locked dashboard, please unlock the dashboard to update")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Dashboard) Update(updateable UpdateableDashboard, updatedBy string, resolvedTags []*tagtypes.Tag) error {
|
||||
if err := d.CanUpdate(); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Info.Metadata = updateable.Metadata
|
||||
d.Info.Data = updateable.Data
|
||||
d.Info.Tags = resolvedTags
|
||||
d.UpdatedBy = updatedBy
|
||||
d.UpdatedAt = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToStorableDashboard packages a Dashboard into the bun row that goes into
|
||||
// the dashboard table. Tags are intentionally omitted — they live in
|
||||
// tag_relations and are inserted separately by the caller.
|
||||
func (d *Dashboard) ToStorableDashboard() (*dashboardtypes.StorableDashboard, error) {
|
||||
data, err := d.Info.toStorableDashboardData()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &dashboardtypes.StorableDashboard{
|
||||
Identifiable: types.Identifiable{ID: d.ID},
|
||||
TimeAuditable: d.TimeAuditable,
|
||||
UserAuditable: d.UserAuditable,
|
||||
OrgID: d.OrgID,
|
||||
Locked: d.Locked,
|
||||
Data: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s StoredDashboardInfo) toStorableDashboardData() (dashboardtypes.StorableDashboardData, error) {
|
||||
raw, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "marshal v2 dashboard data")
|
||||
}
|
||||
out := dashboardtypes.StorableDashboardData{}
|
||||
if err := json.Unmarshal(raw, &out); err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "unmarshal v2 dashboard data")
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
@@ -1,116 +0,0 @@
|
||||
package dashboardtypesv2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
qb "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
v1 "github.com/perses/perses/pkg/model/api/v1"
|
||||
"github.com/perses/perses/pkg/model/api/v1/common"
|
||||
)
|
||||
|
||||
// DashboardData is the SigNoz dashboard v2 spec shape. It mirrors
|
||||
// v1.DashboardSpec (Perses) field-for-field, except every common.Plugin
|
||||
// occurrence is replaced with a typed SigNoz plugin whose OpenAPI schema is a
|
||||
// per-site discriminated oneOf.
|
||||
type DashboardData struct {
|
||||
Display *common.Display `json:"display,omitempty"`
|
||||
Datasources map[string]*DatasourceSpec `json:"datasources,omitempty"`
|
||||
Variables []Variable `json:"variables,omitempty"`
|
||||
Panels map[string]*Panel `json:"panels"`
|
||||
Layouts []Layout `json:"layouts"`
|
||||
Duration common.DurationString `json:"duration"`
|
||||
RefreshInterval common.DurationString `json:"refreshInterval,omitempty"`
|
||||
Links []v1.Link `json:"links,omitempty"`
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Unmarshal + validate entry point
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
func (d *DashboardData) UnmarshalJSON(data []byte) error {
|
||||
dec := json.NewDecoder(bytes.NewReader(data))
|
||||
dec.DisallowUnknownFields()
|
||||
type alias DashboardData
|
||||
var tmp alias
|
||||
if err := dec.Decode(&tmp); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "%s", err.Error())
|
||||
}
|
||||
*d = DashboardData(tmp)
|
||||
return d.Validate()
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Cross-field validation
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
func (d *DashboardData) Validate() error {
|
||||
for key, panel := range d.Panels {
|
||||
if panel == nil {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "spec.panels.%s: panel must not be null", key)
|
||||
}
|
||||
path := fmt.Sprintf("spec.panels.%s", key)
|
||||
panelKind := panel.Spec.Plugin.Kind
|
||||
allowed := allowedQueryKinds[panelKind]
|
||||
for qi, q := range panel.Spec.Queries {
|
||||
queryPath := fmt.Sprintf("%s.spec.queries[%d].spec.plugin", path, qi)
|
||||
if err := validateQueryAllowedForPanel(q.Spec.Plugin, allowed, panelKind, queryPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateQueryAllowedForPanel(plugin QueryPlugin, allowed []QueryPluginKind, panelKind PanelPluginKind, path string) error {
|
||||
if !slices.Contains(allowed, plugin.Kind) {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput,
|
||||
"%s: query kind %q is not supported by panel kind %q", path, plugin.Kind, panelKind)
|
||||
}
|
||||
|
||||
if plugin.Kind != QueryKindComposite {
|
||||
return nil
|
||||
}
|
||||
composite, ok := plugin.Spec.(*CompositeQuerySpec)
|
||||
if !ok || composite == nil {
|
||||
return nil
|
||||
}
|
||||
specJSON, err := json.Marshal(composite)
|
||||
if err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "%s.spec", path)
|
||||
}
|
||||
var subs struct {
|
||||
Queries []struct {
|
||||
Type qb.QueryType `json:"type"`
|
||||
} `json:"queries"`
|
||||
}
|
||||
if err := json.Unmarshal(specJSON, &subs); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "%s.spec", path)
|
||||
}
|
||||
for si, sub := range subs.Queries {
|
||||
subKind, ok := compositeSubQueryTypeToPluginKind[sub.Type]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if !slices.Contains(allowed, subKind) {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput,
|
||||
"%s.spec.queries[%d]: sub-query type %q is not supported by panel kind %q",
|
||||
path, si, sub.Type, panelKind)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
compositeSubQueryTypeToPluginKind = map[qb.QueryType]QueryPluginKind{
|
||||
qb.QueryTypeBuilder: QueryKindBuilder,
|
||||
qb.QueryTypeFormula: QueryKindFormula,
|
||||
qb.QueryTypeTraceOperator: QueryKindTraceOperator,
|
||||
qb.QueryTypePromQL: QueryKindPromQL,
|
||||
qb.QueryTypeClickHouseSQL: QueryKindClickHouseSQL,
|
||||
}
|
||||
)
|
||||
@@ -1,171 +0,0 @@
|
||||
package dashboardtypesv2
|
||||
|
||||
// TestDashboardDataMatchesPerses asserts that DashboardData
|
||||
// and every nested SigNoz-owned type cover the JSON field set of their Perses
|
||||
// counterpart.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1 "github.com/perses/perses/pkg/model/api/v1"
|
||||
"github.com/perses/perses/pkg/model/api/v1/dashboard"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDashboardDataMatchesPerses(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
ours reflect.Type
|
||||
perses reflect.Type
|
||||
}{
|
||||
{"DashboardSpec", typeOf[DashboardData](), typeOf[v1.DashboardSpec]()},
|
||||
{"Panel", typeOf[Panel](), typeOf[v1.Panel]()},
|
||||
{"PanelSpec", typeOf[PanelSpec](), typeOf[v1.PanelSpec]()},
|
||||
{"Query", typeOf[Query](), typeOf[v1.Query]()},
|
||||
{"QuerySpec", typeOf[QuerySpec](), typeOf[v1.QuerySpec]()},
|
||||
{"DatasourceSpec", typeOf[DatasourceSpec](), typeOf[v1.DatasourceSpec]()},
|
||||
{"Variable", typeOf[Variable](), typeOf[dashboard.Variable]()},
|
||||
{"ListVariableSpec", typeOf[ListVariableSpec](), typeOf[dashboard.ListVariableSpec]()},
|
||||
{"TextVariableSpec", typeOf[TextVariableSpec](), typeOf[dashboard.TextVariableSpec]()},
|
||||
{"Layout", typeOf[Layout](), typeOf[dashboard.Layout]()},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
missing, extra := drift(c.ours, c.perses)
|
||||
|
||||
assert.Empty(t, missing,
|
||||
"DashboardData (%s) is missing json fields present on Perses %s — upstream likely added or renamed a field",
|
||||
c.ours.Name(), c.perses.Name())
|
||||
assert.Empty(t, extra,
|
||||
"DashboardData (%s) has json fields absent on Perses %s — upstream likely removed a field or we added one without the counterpart",
|
||||
c.ours.Name(), c.perses.Name())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDriftDetectionMechanics(t *testing.T) {
|
||||
t.Run("upstream added a field", func(t *testing.T) {
|
||||
type ours struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
type perses struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
missing, extra := drift(typeOf[ours](), typeOf[perses]())
|
||||
assert.Equal(t, []string{"description"}, missing, "missing fires: upstream has a field we don't")
|
||||
assert.Empty(t, extra)
|
||||
})
|
||||
|
||||
t.Run("upstream removed a field", func(t *testing.T) {
|
||||
type ours struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
type perses struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
missing, extra := drift(typeOf[ours](), typeOf[perses]())
|
||||
assert.Empty(t, missing)
|
||||
assert.Equal(t, []string{"description"}, extra, "extra fires: we kept a field upstream removed")
|
||||
})
|
||||
|
||||
t.Run("upstream renamed a field", func(t *testing.T) {
|
||||
type ours struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
type perses struct {
|
||||
Name string `json:"title"`
|
||||
}
|
||||
missing, extra := drift(typeOf[ours](), typeOf[perses]())
|
||||
assert.Equal(t, []string{"title"}, missing, "missing fires for the new name")
|
||||
assert.Equal(t, []string{"name"}, extra, "extra fires for the old name — both fire on a rename")
|
||||
})
|
||||
|
||||
t.Run("we added a field upstream does not have", func(t *testing.T) {
|
||||
type ours struct {
|
||||
Name string `json:"name"`
|
||||
Internal string `json:"internal"`
|
||||
}
|
||||
type perses struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
missing, extra := drift(typeOf[ours](), typeOf[perses]())
|
||||
assert.Empty(t, missing)
|
||||
assert.Equal(t, []string{"internal"}, extra, "extra fires: we added a field with no upstream counterpart")
|
||||
})
|
||||
|
||||
t.Run("embedded struct flattens — drift inside the embed is caught", func(t *testing.T) {
|
||||
type embedded struct {
|
||||
Display string `json:"display"`
|
||||
NewBit string `json:"newBit"` // upstream added this inside the embed
|
||||
}
|
||||
type ours struct {
|
||||
Display string `json:"display"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
type perses struct {
|
||||
embedded `json:",inline"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
missing, extra := drift(typeOf[ours](), typeOf[perses]())
|
||||
assert.Equal(t, []string{"newBit"}, missing, "field added inside an inlined embed surfaces at the parent level")
|
||||
assert.Empty(t, extra)
|
||||
})
|
||||
}
|
||||
|
||||
func drift(ours, perses reflect.Type) (missing, extra []string) {
|
||||
o, p := jsonFields(ours), jsonFields(perses)
|
||||
return sortedDiff(p, o), sortedDiff(o, p)
|
||||
}
|
||||
|
||||
// jsonFields returns the set of json tag names for a struct, flattening
|
||||
// anonymous embedded fields (matching encoding/json behavior).
|
||||
func jsonFields(t reflect.Type) map[string]struct{} {
|
||||
out := map[string]struct{}{}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return out
|
||||
}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
// Skip unexported fields (e.g., dashboard.TextVariableSpec has an
|
||||
// unexported `variableSpec` interface tag).
|
||||
if !f.IsExported() && !f.Anonymous {
|
||||
continue
|
||||
}
|
||||
tag := f.Tag.Get("json")
|
||||
name := strings.Split(tag, ",")[0]
|
||||
// Anonymous embed with empty json name (no tag, or `json:",inline"` /
|
||||
// `json:",omitempty"`-style options-only tag) is flattened by encoding/json.
|
||||
if f.Anonymous && name == "" {
|
||||
for k := range jsonFields(f.Type) {
|
||||
out[k] = struct{}{}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if tag == "-" || name == "" {
|
||||
continue
|
||||
}
|
||||
out[name] = struct{}{}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// sortedDiff returns keys in a but not in b, sorted.
|
||||
func sortedDiff(a, b map[string]struct{}) []string {
|
||||
var diff []string
|
||||
for k := range a {
|
||||
if _, ok := b[k]; !ok {
|
||||
diff = append(diff, k)
|
||||
}
|
||||
}
|
||||
sort.Strings(diff)
|
||||
return diff
|
||||
}
|
||||
|
||||
func typeOf[T any]() reflect.Type { return reflect.TypeOf((*T)(nil)).Elem() }
|
||||
@@ -1,301 +0,0 @@
|
||||
package dashboardtypesv2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/go-playground/validator/v10"
|
||||
"github.com/swaggest/jsonschema-go"
|
||||
)
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Panel plugin
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
type PanelPlugin struct {
|
||||
Kind PanelPluginKind `json:"kind"`
|
||||
Spec any `json:"spec"`
|
||||
}
|
||||
|
||||
// PrepareJSONSchema drops the reflected struct shape (type: object, properties)
|
||||
// from the envelope so that only the JSONSchemaOneOf result binds.
|
||||
func (PanelPlugin) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return clearOneOfParentShape(s)
|
||||
}
|
||||
|
||||
func (p *PanelPlugin) UnmarshalJSON(data []byte) error {
|
||||
kind, specJSON, err := extractKindAndSpec(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
factory, ok := panelPluginSpecs[PanelPluginKind(kind)]
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "unknown panel plugin kind %q", kind)
|
||||
}
|
||||
spec, err := decodeSpec(specJSON, factory(), kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Kind = PanelPluginKind(kind)
|
||||
p.Spec = spec
|
||||
return nil
|
||||
}
|
||||
|
||||
func (PanelPlugin) JSONSchemaOneOf() []any {
|
||||
return []any{
|
||||
PanelPluginVariant[TimeSeriesPanelSpec]{Kind: string(PanelKindTimeSeries)},
|
||||
PanelPluginVariant[BarChartPanelSpec]{Kind: string(PanelKindBarChart)},
|
||||
PanelPluginVariant[NumberPanelSpec]{Kind: string(PanelKindNumber)},
|
||||
PanelPluginVariant[PieChartPanelSpec]{Kind: string(PanelKindPieChart)},
|
||||
PanelPluginVariant[TablePanelSpec]{Kind: string(PanelKindTable)},
|
||||
PanelPluginVariant[HistogramPanelSpec]{Kind: string(PanelKindHistogram)},
|
||||
PanelPluginVariant[ListPanelSpec]{Kind: string(PanelKindList)},
|
||||
}
|
||||
}
|
||||
|
||||
type PanelPluginVariant[S any] struct {
|
||||
Kind string `json:"kind" required:"true"`
|
||||
Spec S `json:"spec" required:"true"`
|
||||
}
|
||||
|
||||
func (v PanelPluginVariant[S]) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return restrictKindToOneValue(s, v.Kind)
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Query plugin
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
type QueryPlugin struct {
|
||||
Kind QueryPluginKind `json:"kind"`
|
||||
Spec any `json:"spec"`
|
||||
}
|
||||
|
||||
func (QueryPlugin) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return clearOneOfParentShape(s)
|
||||
}
|
||||
|
||||
func (p *QueryPlugin) UnmarshalJSON(data []byte) error {
|
||||
kind, specJSON, err := extractKindAndSpec(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
factory, ok := queryPluginSpecs[QueryPluginKind(kind)]
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "unknown query plugin kind %q", kind)
|
||||
}
|
||||
spec, err := decodeSpec(specJSON, factory(), kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Kind = QueryPluginKind(kind)
|
||||
p.Spec = spec
|
||||
return nil
|
||||
}
|
||||
|
||||
func (QueryPlugin) JSONSchemaOneOf() []any {
|
||||
return []any{
|
||||
QueryPluginVariant[BuilderQuerySpec]{Kind: string(QueryKindBuilder)},
|
||||
QueryPluginVariant[CompositeQuerySpec]{Kind: string(QueryKindComposite)},
|
||||
QueryPluginVariant[FormulaSpec]{Kind: string(QueryKindFormula)},
|
||||
QueryPluginVariant[PromQLQuerySpec]{Kind: string(QueryKindPromQL)},
|
||||
QueryPluginVariant[ClickHouseSQLQuerySpec]{Kind: string(QueryKindClickHouseSQL)},
|
||||
QueryPluginVariant[TraceOperatorSpec]{Kind: string(QueryKindTraceOperator)},
|
||||
}
|
||||
}
|
||||
|
||||
type QueryPluginVariant[S any] struct {
|
||||
Kind string `json:"kind" required:"true"`
|
||||
Spec S `json:"spec" required:"true"`
|
||||
}
|
||||
|
||||
func (v QueryPluginVariant[S]) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return restrictKindToOneValue(s, v.Kind)
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Variable plugin
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
type VariablePlugin struct {
|
||||
Kind VariablePluginKind `json:"kind"`
|
||||
Spec any `json:"spec"`
|
||||
}
|
||||
|
||||
func (VariablePlugin) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return clearOneOfParentShape(s)
|
||||
}
|
||||
|
||||
func (p *VariablePlugin) UnmarshalJSON(data []byte) error {
|
||||
kind, specJSON, err := extractKindAndSpec(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
factory, ok := variablePluginSpecs[VariablePluginKind(kind)]
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "unknown variable plugin kind %q", kind)
|
||||
}
|
||||
spec, err := decodeSpec(specJSON, factory(), kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Kind = VariablePluginKind(kind)
|
||||
p.Spec = spec
|
||||
return nil
|
||||
}
|
||||
|
||||
func (VariablePlugin) JSONSchemaOneOf() []any {
|
||||
return []any{
|
||||
VariablePluginVariant[DynamicVariableSpec]{Kind: string(VariableKindDynamic)},
|
||||
VariablePluginVariant[QueryVariableSpec]{Kind: string(VariableKindQuery)},
|
||||
VariablePluginVariant[CustomVariableSpec]{Kind: string(VariableKindCustom)},
|
||||
}
|
||||
}
|
||||
|
||||
type VariablePluginVariant[S any] struct {
|
||||
Kind string `json:"kind" required:"true"`
|
||||
Spec S `json:"spec" required:"true"`
|
||||
}
|
||||
|
||||
func (v VariablePluginVariant[S]) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return restrictKindToOneValue(s, v.Kind)
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Datasource plugin
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
type DatasourcePlugin struct {
|
||||
Kind DatasourcePluginKind `json:"kind"`
|
||||
Spec any `json:"spec"`
|
||||
}
|
||||
|
||||
func (DatasourcePlugin) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return clearOneOfParentShape(s)
|
||||
}
|
||||
|
||||
func (p *DatasourcePlugin) UnmarshalJSON(data []byte) error {
|
||||
kind, specJSON, err := extractKindAndSpec(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
factory, ok := datasourcePluginSpecs[DatasourcePluginKind(kind)]
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "unknown datasource plugin kind %q", kind)
|
||||
}
|
||||
spec, err := decodeSpec(specJSON, factory(), kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Kind = DatasourcePluginKind(kind)
|
||||
p.Spec = spec
|
||||
return nil
|
||||
}
|
||||
|
||||
func (DatasourcePlugin) JSONSchemaOneOf() []any {
|
||||
return []any{
|
||||
DatasourcePluginVariant[struct{}]{Kind: string(DatasourceKindSigNoz)},
|
||||
}
|
||||
}
|
||||
|
||||
type DatasourcePluginVariant[S any] struct {
|
||||
Kind string `json:"kind" required:"true"`
|
||||
Spec S `json:"spec" required:"true"`
|
||||
}
|
||||
|
||||
func (v DatasourcePluginVariant[S]) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return restrictKindToOneValue(s, v.Kind)
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Helpers
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
var (
|
||||
panelPluginSpecs = map[PanelPluginKind]func() any{
|
||||
PanelKindTimeSeries: func() any { return new(TimeSeriesPanelSpec) },
|
||||
PanelKindBarChart: func() any { return new(BarChartPanelSpec) },
|
||||
PanelKindNumber: func() any { return new(NumberPanelSpec) },
|
||||
PanelKindPieChart: func() any { return new(PieChartPanelSpec) },
|
||||
PanelKindTable: func() any { return new(TablePanelSpec) },
|
||||
PanelKindHistogram: func() any { return new(HistogramPanelSpec) },
|
||||
PanelKindList: func() any { return new(ListPanelSpec) },
|
||||
}
|
||||
queryPluginSpecs = map[QueryPluginKind]func() any{
|
||||
QueryKindBuilder: func() any { return new(BuilderQuerySpec) },
|
||||
QueryKindComposite: func() any { return new(CompositeQuerySpec) },
|
||||
QueryKindFormula: func() any { return new(FormulaSpec) },
|
||||
QueryKindPromQL: func() any { return new(PromQLQuerySpec) },
|
||||
QueryKindClickHouseSQL: func() any { return new(ClickHouseSQLQuerySpec) },
|
||||
QueryKindTraceOperator: func() any { return new(TraceOperatorSpec) },
|
||||
}
|
||||
variablePluginSpecs = map[VariablePluginKind]func() any{
|
||||
VariableKindDynamic: func() any { return new(DynamicVariableSpec) },
|
||||
VariableKindQuery: func() any { return new(QueryVariableSpec) },
|
||||
VariableKindCustom: func() any { return new(CustomVariableSpec) },
|
||||
}
|
||||
datasourcePluginSpecs = map[DatasourcePluginKind]func() any{
|
||||
DatasourceKindSigNoz: func() any { return new(struct{}) },
|
||||
}
|
||||
|
||||
allowedQueryKinds = map[PanelPluginKind][]QueryPluginKind{
|
||||
PanelKindTimeSeries: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindPromQL, QueryKindClickHouseSQL},
|
||||
PanelKindBarChart: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindPromQL, QueryKindClickHouseSQL},
|
||||
PanelKindNumber: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindPromQL, QueryKindClickHouseSQL},
|
||||
PanelKindHistogram: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindPromQL, QueryKindClickHouseSQL},
|
||||
PanelKindPieChart: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindClickHouseSQL},
|
||||
PanelKindTable: {QueryKindBuilder, QueryKindComposite, QueryKindFormula, QueryKindTraceOperator, QueryKindClickHouseSQL},
|
||||
PanelKindList: {QueryKindBuilder},
|
||||
}
|
||||
)
|
||||
|
||||
// extractKindAndSpec parses a {"kind": "...", "spec": {...}} envelope and returns
|
||||
// kind and the raw spec bytes for typed decoding.
|
||||
func extractKindAndSpec(data []byte) (string, []byte, error) {
|
||||
var head struct {
|
||||
Kind string `json:"kind"`
|
||||
Spec json.RawMessage `json:"spec"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &head); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return head.Kind, head.Spec, nil
|
||||
}
|
||||
|
||||
// decodeSpec strict-decodes a spec JSON into target and runs struct-tag validation (go-playground/validator).
|
||||
func decodeSpec(specJSON []byte, target any, kind string) (any, error) {
|
||||
if len(specJSON) == 0 {
|
||||
return nil, errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "kind %q: spec is required", kind)
|
||||
}
|
||||
dec := json.NewDecoder(bytes.NewReader(specJSON))
|
||||
dec.DisallowUnknownFields()
|
||||
if err := dec.Decode(target); err != nil {
|
||||
return nil, errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "kind %q: invalid spec JSON", kind)
|
||||
}
|
||||
if err := validator.New().Struct(target); err != nil {
|
||||
return nil, errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "kind %q: spec failed validation", kind)
|
||||
}
|
||||
return target, nil
|
||||
}
|
||||
|
||||
// clearOneOfParentShape drops Type and Properties on a schema that also has a JSONSchemaOneOf.
|
||||
func clearOneOfParentShape(s *jsonschema.Schema) error {
|
||||
s.Type = nil
|
||||
s.Properties = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// restrictKindToOneValue ensures that the schema only allows one Kind value for a type.
|
||||
// For eg. PanelPluginVariant[TimeSeriesPanelSpec]{Kind: string(PanelKindTimeSeries)} should
|
||||
// only allow "signoz/TimeSeriesPanel" in its kind field.
|
||||
func restrictKindToOneValue(schema *jsonschema.Schema, kind string) error {
|
||||
kindProp, ok := schema.Properties["kind"]
|
||||
if !ok || kindProp.TypeObject == nil {
|
||||
return errors.NewInternalf(errors.CodeInternal, "variant schema missing `kind` property")
|
||||
}
|
||||
kindProp.TypeObject.WithEnum(kind)
|
||||
schema.Properties["kind"] = kindProp
|
||||
return nil
|
||||
}
|
||||
@@ -1,189 +0,0 @@
|
||||
package dashboardtypesv2
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
v1 "github.com/perses/perses/pkg/model/api/v1"
|
||||
"github.com/perses/perses/pkg/model/api/v1/common"
|
||||
"github.com/perses/perses/pkg/model/api/v1/dashboard"
|
||||
"github.com/perses/perses/pkg/model/api/v1/variable"
|
||||
"github.com/swaggest/jsonschema-go"
|
||||
)
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Datasource
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
type DatasourceSpec struct {
|
||||
Display *common.Display `json:"display,omitempty"`
|
||||
Default bool `json:"default"`
|
||||
Plugin DatasourcePlugin `json:"plugin"`
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Panel
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
type Panel struct {
|
||||
Kind string `json:"kind"`
|
||||
Spec PanelSpec `json:"spec"`
|
||||
}
|
||||
|
||||
type PanelSpec struct {
|
||||
Display *v1.PanelDisplay `json:"display,omitempty"`
|
||||
Plugin PanelPlugin `json:"plugin"`
|
||||
Queries []Query `json:"queries,omitempty"`
|
||||
Links []v1.Link `json:"links,omitempty"`
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Query
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
type Query struct {
|
||||
Kind string `json:"kind"`
|
||||
Spec QuerySpec `json:"spec"`
|
||||
}
|
||||
|
||||
type QuerySpec struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Plugin QueryPlugin `json:"plugin"`
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Variable
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
// Variable is the list/text sum type. Spec is set to *ListVariableSpec or
|
||||
// *TextVariableSpec by UnmarshalJSON based on Kind. The schema is a
|
||||
// discriminated oneOf (see JSONSchemaOneOf).
|
||||
type Variable struct {
|
||||
Kind variable.Kind `json:"kind"`
|
||||
Spec any `json:"spec"`
|
||||
}
|
||||
|
||||
func (Variable) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return clearOneOfParentShape(s)
|
||||
}
|
||||
|
||||
func (v *Variable) UnmarshalJSON(data []byte) error {
|
||||
kind, specJSON, err := extractKindAndSpec(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch kind {
|
||||
case string(variable.KindList):
|
||||
spec, err := decodeSpec(specJSON, new(ListVariableSpec), kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Kind = variable.KindList
|
||||
v.Spec = spec
|
||||
case string(variable.KindText):
|
||||
spec, err := decodeSpec(specJSON, new(TextVariableSpec), kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.Kind = variable.KindText
|
||||
v.Spec = spec
|
||||
default:
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "unknown variable kind %q", kind)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Variable) JSONSchemaOneOf() []any {
|
||||
return []any{
|
||||
VariableEnvelope[ListVariableSpec]{Kind: string(variable.KindList)},
|
||||
VariableEnvelope[TextVariableSpec]{Kind: string(variable.KindText)},
|
||||
}
|
||||
}
|
||||
|
||||
type VariableEnvelope[S any] struct {
|
||||
Kind string `json:"kind" required:"true"`
|
||||
Spec S `json:"spec" required:"true"`
|
||||
}
|
||||
|
||||
func (v VariableEnvelope[S]) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return restrictKindToOneValue(s, v.Kind)
|
||||
}
|
||||
|
||||
// ListVariableSpec mirrors dashboard.ListVariableSpec (variable.ListSpec
|
||||
// fields + Name) but with a typed VariablePlugin replacing common.Plugin.
|
||||
type ListVariableSpec struct {
|
||||
Display *variable.Display `json:"display,omitempty"`
|
||||
DefaultValue *variable.DefaultValue `json:"defaultValue,omitempty"`
|
||||
AllowAllValue bool `json:"allowAllValue"`
|
||||
AllowMultiple bool `json:"allowMultiple"`
|
||||
CustomAllValue string `json:"customAllValue,omitempty"`
|
||||
CapturingRegexp string `json:"capturingRegexp,omitempty"`
|
||||
Sort *variable.Sort `json:"sort,omitempty"`
|
||||
Plugin VariablePlugin `json:"plugin"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// TextVariableSpec mirrors dashboard.TextVariableSpec (variable.TextSpec +
|
||||
// Name). No plugin.
|
||||
type TextVariableSpec struct {
|
||||
Display *variable.Display `json:"display,omitempty"`
|
||||
Value string `json:"value"`
|
||||
Constant bool `json:"constant,omitempty"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// Layout
|
||||
// ══════════════════════════════════════════════
|
||||
|
||||
// Layout is the dashboard layout sum type. Spec is populated by UnmarshalJSON
|
||||
// with the concrete layout spec struct (today only dashboard.GridLayoutSpec)
|
||||
// based on Kind. No plugin is involved, so we reuse the Perses spec types as
|
||||
// leaf imports.
|
||||
type Layout struct {
|
||||
Kind dashboard.LayoutKind `json:"kind"`
|
||||
Spec any `json:"spec"`
|
||||
}
|
||||
|
||||
// layoutSpecs is the layout sum type factory. Perses only defines
|
||||
// KindGridLayout today; adding a new kind upstream surfaces as an
|
||||
// "unknown layout kind" runtime error here until we add it.
|
||||
var layoutSpecs = map[dashboard.LayoutKind]func() any{
|
||||
dashboard.KindGridLayout: func() any { return new(dashboard.GridLayoutSpec) },
|
||||
}
|
||||
|
||||
func (Layout) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return clearOneOfParentShape(s)
|
||||
}
|
||||
|
||||
func (l *Layout) UnmarshalJSON(data []byte) error {
|
||||
kind, specJSON, err := extractKindAndSpec(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
factory, ok := layoutSpecs[dashboard.LayoutKind(kind)]
|
||||
if !ok {
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "unknown layout kind %q", kind)
|
||||
}
|
||||
spec, err := decodeSpec(specJSON, factory(), kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Kind = dashboard.LayoutKind(kind)
|
||||
l.Spec = spec
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Layout) JSONSchemaOneOf() []any {
|
||||
return []any{
|
||||
LayoutEnvelope[dashboard.GridLayoutSpec]{Kind: string(dashboard.KindGridLayout)},
|
||||
}
|
||||
}
|
||||
|
||||
type LayoutEnvelope[S any] struct {
|
||||
Kind string `json:"kind" required:"true"`
|
||||
Spec S `json:"spec" required:"true"`
|
||||
}
|
||||
|
||||
func (v LayoutEnvelope[S]) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return restrictKindToOneValue(s, v.Kind)
|
||||
}
|
||||
@@ -1,15 +1,13 @@
|
||||
package dashboardtypesv2
|
||||
package dashboardtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
qb "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/swaggest/jsonschema-go"
|
||||
)
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
@@ -22,10 +20,11 @@ const (
|
||||
VariableKindDynamic VariablePluginKind = "signoz/DynamicVariable"
|
||||
VariableKindQuery VariablePluginKind = "signoz/QueryVariable"
|
||||
VariableKindCustom VariablePluginKind = "signoz/CustomVariable"
|
||||
VariableKindTextbox VariablePluginKind = "signoz/TextboxVariable"
|
||||
)
|
||||
|
||||
func (VariablePluginKind) Enum() []any {
|
||||
return []any{VariableKindDynamic, VariableKindQuery, VariableKindCustom}
|
||||
return []any{VariableKindDynamic, VariableKindQuery, VariableKindCustom, VariableKindTextbox}
|
||||
}
|
||||
|
||||
type DynamicVariableSpec struct {
|
||||
@@ -43,6 +42,8 @@ type CustomVariableSpec struct {
|
||||
CustomValue string `json:"customValue" validate:"required" required:"true"`
|
||||
}
|
||||
|
||||
type TextboxVariableSpec struct{}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// SigNoz query plugin specs — aliased from querybuildertypesv5
|
||||
// ══════════════════════════════════════════════
|
||||
@@ -80,36 +81,12 @@ type BuilderQuerySpec struct {
|
||||
func (b *BuilderQuerySpec) UnmarshalJSON(data []byte) error {
|
||||
spec, err := qb.UnmarshalBuilderQueryBySignal(data)
|
||||
if err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "invalid builder query spec")
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "invalid builder query spec")
|
||||
}
|
||||
b.Spec = spec
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON delegates to the inner Spec so the on-wire shape matches what
|
||||
// UnmarshalJSON expects (a flat builder-query payload with `signal` at the top
|
||||
// level). Without this, Go's default would wrap it as {"Spec": {...}} and the
|
||||
// signal-dispatch on read would fail.
|
||||
func (b BuilderQuerySpec) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(b.Spec)
|
||||
}
|
||||
|
||||
// PrepareJSONSchema drops the reflected struct shape so only the
|
||||
// JSONSchemaOneOf result binds.
|
||||
func (BuilderQuerySpec) PrepareJSONSchema(s *jsonschema.Schema) error {
|
||||
return clearOneOfParentShape(s)
|
||||
}
|
||||
|
||||
// JSONSchemaOneOf exposes the three signal-dispatched shapes a builder query
|
||||
// can take. Mirrors qb.UnmarshalBuilderQueryBySignal's runtime dispatch.
|
||||
func (BuilderQuerySpec) JSONSchemaOneOf() []any {
|
||||
return []any{
|
||||
qb.QueryBuilderQuery[qb.LogAggregation]{},
|
||||
qb.QueryBuilderQuery[qb.MetricAggregation]{},
|
||||
qb.QueryBuilderQuery[qb.TraceAggregation]{},
|
||||
}
|
||||
}
|
||||
|
||||
// ══════════════════════════════════════════════
|
||||
// SigNoz panel plugin specs
|
||||
// ══════════════════════════════════════════════
|
||||
@@ -295,7 +272,7 @@ func (t TimePreference) MarshalJSON() ([]byte, error) {
|
||||
func (t *TimePreference) UnmarshalJSON(data []byte) error {
|
||||
var v string
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "invalid timePreference: must be a string, one of `global_time`, `last_5_min`, `last_15_min`, `last_30_min`, `last_1_hr`, `last_6_hr`, `last_1_day`, `last_3_days`, `last_1_week`, or `last_1_month`")
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "invalid timePreference: must be a string, one of `global_time`, `last_5_min`, `last_15_min`, `last_30_min`, `last_1_hr`, `last_6_hr`, `last_1_day`, `last_3_days`, `last_1_week`, or `last_1_month`")
|
||||
}
|
||||
if v == "" {
|
||||
*t = TimePreferenceGlobalTime
|
||||
@@ -307,7 +284,7 @@ func (t *TimePreference) UnmarshalJSON(data []byte) error {
|
||||
*t = tp
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "invalid timePreference %q: must be `global_time`, `last_5_min`, `last_15_min`, `last_30_min`, `last_1_hr`, `last_6_hr`, `last_1_day`, `last_3_days`, `last_1_week`, or `last_1_month`", v)
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "invalid timePreference %q: must be `global_time`, `last_5_min`, `last_15_min`, `last_30_min`, `last_1_hr`, `last_6_hr`, `last_1_day`, `last_3_days`, `last_1_week`, or `last_1_month`", v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,7 +313,7 @@ func (l LegendPosition) MarshalJSON() ([]byte, error) {
|
||||
func (l *LegendPosition) UnmarshalJSON(data []byte) error {
|
||||
var v string
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "invalid legend position: must be a string, one of `bottom` or `right`")
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "invalid legend position: must be a string, one of `bottom` or `right`")
|
||||
}
|
||||
if v == "" {
|
||||
*l = LegendPositionBottom
|
||||
@@ -348,7 +325,7 @@ func (l *LegendPosition) UnmarshalJSON(data []byte) error {
|
||||
*l = lp
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "invalid legend position %q: must be `bottom` or `right`", v)
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "invalid legend position %q: must be `bottom` or `right`", v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -377,7 +354,7 @@ func (f ThresholdFormat) MarshalJSON() ([]byte, error) {
|
||||
func (f *ThresholdFormat) UnmarshalJSON(data []byte) error {
|
||||
var v string
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "invalid threshold format: must be a string, one of `text` or `background`")
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "invalid threshold format: must be a string, one of `text` or `background`")
|
||||
}
|
||||
if v == "" {
|
||||
*f = ThresholdFormatText
|
||||
@@ -389,7 +366,7 @@ func (f *ThresholdFormat) UnmarshalJSON(data []byte) error {
|
||||
*f = tf
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "invalid threshold format %q: must be `text` or `background`", v)
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "invalid threshold format %q: must be `text` or `background`", v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -429,7 +406,7 @@ func (o ComparisonOperator) MarshalJSON() ([]byte, error) {
|
||||
func (o *ComparisonOperator) UnmarshalJSON(data []byte) error {
|
||||
var v string
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "invalid comparison operator: must be a string, one of `>`, `<`, `>=`, `<=`, `=`, `above`, `below`, `above_or_equal`, `below_or_equal`, `equal`, or `not_equal`")
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "invalid comparison operator: must be a string, one of `>`, `<`, `>=`, `<=`, `=`, `above`, `below`, `above_or_equal`, `below_or_equal`, `equal`, or `not_equal`")
|
||||
}
|
||||
if v == "" {
|
||||
*o = ComparisonOperatorGT
|
||||
@@ -443,7 +420,7 @@ func (o *ComparisonOperator) UnmarshalJSON(data []byte) error {
|
||||
*o = co
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "invalid comparison operator %q: must be `>`, `<`, `>=`, `<=`, `=`, `above`, `below`, `above_or_equal`, `below_or_equal`, `equal`, or `not_equal`", v)
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "invalid comparison operator %q: must be `>`, `<`, `>=`, `<=`, `=`, `above`, `below`, `above_or_equal`, `below_or_equal`, `equal`, or `not_equal`", v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -474,7 +451,7 @@ func (li LineInterpolation) MarshalJSON() ([]byte, error) {
|
||||
func (li *LineInterpolation) UnmarshalJSON(data []byte) error {
|
||||
var v string
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "invalid line interpolation: must be a string, one of `linear`, `spline`, `step_after`, or `step_before`")
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "invalid line interpolation: must be a string, one of `linear`, `spline`, `step_after`, or `step_before`")
|
||||
}
|
||||
if v == "" {
|
||||
*li = LineInterpolationSpline
|
||||
@@ -486,7 +463,7 @@ func (li *LineInterpolation) UnmarshalJSON(data []byte) error {
|
||||
*li = val
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "invalid line interpolation %q: must be `linear`, `spline`, `step_after`, or `step_before`", v)
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "invalid line interpolation %q: must be `linear`, `spline`, `step_after`, or `step_before`", v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -515,7 +492,7 @@ func (ls LineStyle) MarshalJSON() ([]byte, error) {
|
||||
func (ls *LineStyle) UnmarshalJSON(data []byte) error {
|
||||
var v string
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "invalid line style: must be a string, one of `solid` or `dashed`")
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "invalid line style: must be a string, one of `solid` or `dashed`")
|
||||
}
|
||||
if v == "" {
|
||||
*ls = LineStyleSolid
|
||||
@@ -527,7 +504,7 @@ func (ls *LineStyle) UnmarshalJSON(data []byte) error {
|
||||
*ls = val
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "invalid line style %q: must be `solid` or `dashed`", v)
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "invalid line style %q: must be `solid` or `dashed`", v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -557,7 +534,7 @@ func (fm FillMode) MarshalJSON() ([]byte, error) {
|
||||
func (fm *FillMode) UnmarshalJSON(data []byte) error {
|
||||
var v string
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "invalid fill mode: must be a string, one of `solid`, `gradient`, or `none`")
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "invalid fill mode: must be a string, one of `solid`, `gradient`, or `none`")
|
||||
}
|
||||
if v == "" {
|
||||
*fm = FillModeSolid
|
||||
@@ -569,7 +546,7 @@ func (fm *FillMode) UnmarshalJSON(data []byte) error {
|
||||
*fm = val
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "invalid fill mode %q: must be `solid`, `gradient`, or `none`", v)
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "invalid fill mode %q: must be `solid`, `gradient`, or `none`", v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -616,12 +593,12 @@ func (p *PrecisionOption) UnmarshalJSON(data []byte) error {
|
||||
p.String = valuer.NewString(strconv.Itoa(n))
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "invalid precision option %d: must be `0`, `1`, `2`, `3`, `4`, or `full`", n)
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "invalid precision option %d: must be `0`, `1`, `2`, `3`, `4`, or `full`", n)
|
||||
}
|
||||
}
|
||||
var v string
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return errors.WrapInvalidInputf(err, dashboardtypes.ErrCodeDashboardInvalidInput, "invalid precision option: must be `0`, `1`, `2`, `3`, `4`, or `full`")
|
||||
return errors.WrapInvalidInputf(err, ErrCodeDashboardInvalidInput, "invalid precision option: must be `0`, `1`, `2`, `3`, `4`, or `full`")
|
||||
}
|
||||
if v == "" {
|
||||
*p = PrecisionOption2
|
||||
@@ -633,6 +610,6 @@ func (p *PrecisionOption) UnmarshalJSON(data []byte) error {
|
||||
*p = val
|
||||
return nil
|
||||
default:
|
||||
return errors.NewInvalidInputf(dashboardtypes.ErrCodeDashboardInvalidInput, "invalid precision option %q: must be `0`, `1`, `2`, `3`, `4`, or `full`", v)
|
||||
return errors.NewInvalidInputf(ErrCodeDashboardInvalidInput, "invalid precision option %q: must be `0`, `1`, `2`, `3`, `4`, or `full`", v)
|
||||
}
|
||||
}
|
||||
@@ -32,14 +32,4 @@ type Store interface {
|
||||
DeletePublic(context.Context, string) error
|
||||
|
||||
RunInTx(context.Context, func(context.Context) error) error
|
||||
|
||||
// ════════════════════════════════════════════════════════════════════════
|
||||
// v2 dashboard methods
|
||||
// ════════════════════════════════════════════════════════════════════════
|
||||
GetV2(context.Context, valuer.UUID, valuer.UUID) (*StorableDashboard, *StorablePublicDashboard, error)
|
||||
|
||||
// UpdateV2 updates the dashboard's data, updated_at and updated_by columns
|
||||
// only, scoped by org and excluding soft-deleted rows. Uses the caller's
|
||||
// transaction context so it can be made atomic with tag relation changes.
|
||||
UpdateV2(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, data StorableDashboardData) error
|
||||
}
|
||||
|
||||
@@ -76,7 +76,11 @@
|
||||
"display": {
|
||||
"name": "textboxvar"
|
||||
},
|
||||
"value": "defaultvaluegoeshere"
|
||||
"value": "defaultvaluegoeshere",
|
||||
"plugin": {
|
||||
"kind": "signoz/TextboxVariable",
|
||||
"spec": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
103
pkg/types/inframonitoringtypes/namespaces.go
Normal file
103
pkg/types/inframonitoringtypes/namespaces.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
type Namespaces struct {
|
||||
Type ResponseType `json:"type" required:"true"`
|
||||
Records []NamespaceRecord `json:"records" required:"true"`
|
||||
Total int `json:"total" required:"true"`
|
||||
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck" required:"true"`
|
||||
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention" required:"true"`
|
||||
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
|
||||
}
|
||||
|
||||
type NamespaceRecord struct {
|
||||
NamespaceName string `json:"namespaceName" required:"true"`
|
||||
NamespaceCPU float64 `json:"namespaceCPU" required:"true"`
|
||||
NamespaceMemory float64 `json:"namespaceMemory" required:"true"`
|
||||
PendingPodCount int `json:"pendingPodCount" required:"true"`
|
||||
RunningPodCount int `json:"runningPodCount" required:"true"`
|
||||
SucceededPodCount int `json:"succeededPodCount" required:"true"`
|
||||
FailedPodCount int `json:"failedPodCount" required:"true"`
|
||||
UnknownPodCount int `json:"unknownPodCount" required:"true"`
|
||||
Meta map[string]interface{} `json:"meta" required:"true"`
|
||||
}
|
||||
|
||||
// PostableNamespaces is the request body for the v2 namespaces list API.
|
||||
type PostableNamespaces struct {
|
||||
Start int64 `json:"start" required:"true"`
|
||||
End int64 `json:"end" required:"true"`
|
||||
Filter *qbtypes.Filter `json:"filter"`
|
||||
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
|
||||
OrderBy *qbtypes.OrderBy `json:"orderBy"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit" required:"true"`
|
||||
}
|
||||
|
||||
// Validate ensures PostableNamespaces contains acceptable values.
|
||||
func (req *PostableNamespaces) Validate() error {
|
||||
if req == nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
|
||||
}
|
||||
|
||||
if req.Start <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid start time %d: start must be greater than 0",
|
||||
req.Start,
|
||||
)
|
||||
}
|
||||
|
||||
if req.End <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid end time %d: end must be greater than 0",
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Start >= req.End {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time range: start (%d) must be less than end (%d)",
|
||||
req.Start,
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Limit < 1 || req.Limit > 5000 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
|
||||
}
|
||||
|
||||
if req.Offset < 0 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
|
||||
}
|
||||
|
||||
if req.OrderBy != nil {
|
||||
if !slices.Contains(NamespacesValidOrderByKeys, req.OrderBy.Key.Name) {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
|
||||
}
|
||||
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON validates input immediately after decoding.
|
||||
func (req *PostableNamespaces) UnmarshalJSON(data []byte) error {
|
||||
type raw PostableNamespaces
|
||||
var decoded raw
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
*req = PostableNamespaces(decoded)
|
||||
return req.Validate()
|
||||
}
|
||||
11
pkg/types/inframonitoringtypes/namespaces_constants.go
Normal file
11
pkg/types/inframonitoringtypes/namespaces_constants.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
const (
|
||||
NamespacesOrderByCPU = "cpu"
|
||||
NamespacesOrderByMemory = "memory"
|
||||
)
|
||||
|
||||
var NamespacesValidOrderByKeys = []string{
|
||||
NamespacesOrderByCPU,
|
||||
NamespacesOrderByMemory,
|
||||
}
|
||||
237
pkg/types/inframonitoringtypes/namespaces_test.go
Normal file
237
pkg/types/inframonitoringtypes/namespaces_test.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPostableNamespaces_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req *PostableNamespaces
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil request",
|
||||
req: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time zero",
|
||||
req: &PostableNamespaces{
|
||||
Start: 0,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time negative",
|
||||
req: &PostableNamespaces{
|
||||
Start: -1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "end time zero",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 0,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time greater than end time",
|
||||
req: &PostableNamespaces{
|
||||
Start: 2000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time equal to end time",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit zero",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 0,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit negative",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: -10,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit exceeds max",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 5001,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "offset negative",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: -5,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy nil is valid",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key cpu and direction asc",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: NamespacesOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key memory and direction desc",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: NamespacesOrderByMemory,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with pod_phase key is rejected",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "pod_phase",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with invalid key",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "unknown",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key but invalid direction",
|
||||
req: &PostableNamespaces{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: NamespacesOrderByMemory,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.req.Validate()
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
103
pkg/types/inframonitoringtypes/nodes.go
Normal file
103
pkg/types/inframonitoringtypes/nodes.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
type Nodes struct {
|
||||
Type ResponseType `json:"type" required:"true"`
|
||||
Records []NodeRecord `json:"records" required:"true"`
|
||||
Total int `json:"total" required:"true"`
|
||||
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck" required:"true"`
|
||||
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention" required:"true"`
|
||||
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
|
||||
}
|
||||
|
||||
type NodeRecord struct {
|
||||
NodeName string `json:"nodeName" required:"true"`
|
||||
Condition NodeCondition `json:"condition" required:"true"`
|
||||
ReadyNodesCount int `json:"readyNodesCount" required:"true"`
|
||||
NotReadyNodesCount int `json:"notReadyNodesCount" required:"true"`
|
||||
NodeCPU float64 `json:"nodeCPU" required:"true"`
|
||||
NodeCPUAllocatable float64 `json:"nodeCPUAllocatable" required:"true"`
|
||||
NodeMemory float64 `json:"nodeMemory" required:"true"`
|
||||
NodeMemoryAllocatable float64 `json:"nodeMemoryAllocatable" required:"true"`
|
||||
Meta map[string]interface{} `json:"meta" required:"true"`
|
||||
}
|
||||
|
||||
// PostableNodes is the request body for the v2 nodes list API.
|
||||
type PostableNodes struct {
|
||||
Start int64 `json:"start" required:"true"`
|
||||
End int64 `json:"end" required:"true"`
|
||||
Filter *qbtypes.Filter `json:"filter"`
|
||||
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
|
||||
OrderBy *qbtypes.OrderBy `json:"orderBy"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit" required:"true"`
|
||||
}
|
||||
|
||||
// Validate ensures PostableNodes contains acceptable values.
|
||||
func (req *PostableNodes) Validate() error {
|
||||
if req == nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
|
||||
}
|
||||
|
||||
if req.Start <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid start time %d: start must be greater than 0",
|
||||
req.Start,
|
||||
)
|
||||
}
|
||||
|
||||
if req.End <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid end time %d: end must be greater than 0",
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Start >= req.End {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time range: start (%d) must be less than end (%d)",
|
||||
req.Start,
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Limit < 1 || req.Limit > 5000 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
|
||||
}
|
||||
|
||||
if req.Offset < 0 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
|
||||
}
|
||||
|
||||
if req.OrderBy != nil {
|
||||
if !slices.Contains(NodesValidOrderByKeys, req.OrderBy.Key.Name) {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
|
||||
}
|
||||
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON validates input immediately after decoding.
|
||||
func (req *PostableNodes) UnmarshalJSON(data []byte) error {
|
||||
type raw PostableNodes
|
||||
var decoded raw
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
*req = PostableNodes(decoded)
|
||||
return req.Validate()
|
||||
}
|
||||
42
pkg/types/inframonitoringtypes/nodes_constants.go
Normal file
42
pkg/types/inframonitoringtypes/nodes_constants.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
type NodeCondition struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
NodeConditionReady = NodeCondition{valuer.NewString("ready")}
|
||||
NodeConditionNotReady = NodeCondition{valuer.NewString("not_ready")}
|
||||
NodeConditionNone = NodeCondition{valuer.NewString("")}
|
||||
)
|
||||
|
||||
func (NodeCondition) Enum() []any {
|
||||
return []any{
|
||||
NodeConditionReady,
|
||||
NodeConditionNotReady,
|
||||
NodeConditionNone,
|
||||
}
|
||||
}
|
||||
|
||||
// Numeric values emitted by the k8s.node.condition_ready metric
|
||||
// (source: OTel kubeletstats receiver).
|
||||
const (
|
||||
NodeConditionNumReady = 1
|
||||
NodeConditionNumNotReady = 0
|
||||
)
|
||||
|
||||
const (
|
||||
NodesOrderByCPU = "cpu"
|
||||
NodesOrderByCPUAllocatable = "cpu_allocatable"
|
||||
NodesOrderByMemory = "memory"
|
||||
NodesOrderByMemoryAllocatable = "memory_allocatable"
|
||||
)
|
||||
|
||||
var NodesValidOrderByKeys = []string{
|
||||
NodesOrderByCPU,
|
||||
NodesOrderByCPUAllocatable,
|
||||
NodesOrderByMemory,
|
||||
NodesOrderByMemoryAllocatable,
|
||||
}
|
||||
255
pkg/types/inframonitoringtypes/nodes_test.go
Normal file
255
pkg/types/inframonitoringtypes/nodes_test.go
Normal file
@@ -0,0 +1,255 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPostableNodes_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req *PostableNodes
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil request",
|
||||
req: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time zero",
|
||||
req: &PostableNodes{
|
||||
Start: 0,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time negative",
|
||||
req: &PostableNodes{
|
||||
Start: -1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "end time zero",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 0,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time greater than end time",
|
||||
req: &PostableNodes{
|
||||
Start: 2000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time equal to end time",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit zero",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 0,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit negative",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: -10,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit exceeds max",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 5001,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "offset negative",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: -5,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy nil is valid",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key cpu and direction asc",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: NodesOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key cpu_allocatable and direction desc",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: NodesOrderByCPUAllocatable,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key memory_allocatable and direction asc",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: NodesOrderByMemoryAllocatable,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with condition key is rejected",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "condition",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with invalid key",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "unknown",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key but invalid direction",
|
||||
req: &PostableNodes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: NodesOrderByMemory,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.req.Validate()
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package tagtypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Store interface {
|
||||
List(ctx context.Context, orgID valuer.UUID) ([]*Tag, error)
|
||||
|
||||
ListByEntity(ctx context.Context, entityID valuer.UUID) ([]*Tag, error)
|
||||
|
||||
// Create upserts the given tags and returns them with authoritative IDs.
|
||||
// On conflict on (org_id, internal_name) — which happens only when a
|
||||
// concurrent insert raced ours — the returned entry carries the existing
|
||||
// row's ID rather than the pre-generated one in the input.
|
||||
Create(ctx context.Context, tags []*Tag) ([]*Tag, error)
|
||||
|
||||
// CreateRelations inserts tag-entity relations. Conflicts on the composite primary key are ignored.
|
||||
CreateRelations(ctx context.Context, relations []*TagRelation) error
|
||||
|
||||
DeleteRelationsExcept(ctx context.Context, entityID valuer.UUID, keepTagIDs []valuer.UUID) error
|
||||
}
|
||||
@@ -1,197 +0,0 @@
|
||||
package tagtypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
const (
|
||||
// separator users type in the display name (e.g. "team/blr").
|
||||
HierarchySeparator = "/"
|
||||
|
||||
// separator used in internal_name. Different from HierarchySeparator
|
||||
// because "/" is reserved by the access control layer.
|
||||
InternalSeparator = "::"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrCodeTagInvalidName = errors.MustNewCode("tag_invalid_name")
|
||||
ErrCodeTagNotFound = errors.MustNewCode("tag_not_found")
|
||||
)
|
||||
|
||||
type Tag struct {
|
||||
bun.BaseModel `bun:"table:tag,alias:tag"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
Name string `json:"name" required:"true" bun:"name,type:text,notnull"`
|
||||
InternalName string `json:"internalName" required:"true" bun:"internal_name,type:text,notnull,unique:org_id_internal_name"`
|
||||
OrgID valuer.UUID `json:"orgId" required:"true" bun:"org_id,type:text,notnull,unique:org_id_internal_name"`
|
||||
}
|
||||
|
||||
type PostableTag struct {
|
||||
Name string `json:"name" required:"true"`
|
||||
}
|
||||
|
||||
type GettableTag struct {
|
||||
Name string `json:"name" required:"true"`
|
||||
}
|
||||
|
||||
func NewGettableTagFromTag(tag *Tag) *GettableTag {
|
||||
return &GettableTag{Name: tag.Name}
|
||||
}
|
||||
|
||||
func NewGettableTagsFromTags(tags []*Tag) []*GettableTag {
|
||||
out := make([]*GettableTag, len(tags))
|
||||
for i, t := range tags {
|
||||
out[i] = NewGettableTagFromTag(t)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func NewTag(orgID valuer.UUID, name string, internalName string, createdBy string) *Tag {
|
||||
now := time.Now()
|
||||
return &Tag{
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
},
|
||||
UserAuditable: types.UserAuditable{
|
||||
CreatedBy: createdBy,
|
||||
UpdatedBy: createdBy,
|
||||
},
|
||||
Name: name,
|
||||
InternalName: internalName,
|
||||
OrgID: orgID,
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve canonicalizes a batch of user-supplied tag names against the existing
|
||||
// tags for an org. Existing parent tags' casing is reused so that
|
||||
// "teams/blr/platform" inherits the "BLR" casing from a pre-existing
|
||||
// "teams/BLR". Inputs are deduped by internal name. Returns:
|
||||
// - toCreate: new Tag rows the caller should insert (with pre-generated IDs)
|
||||
// - matched: existing rows that the caller's input already pointed to. They
|
||||
// already carry authoritative IDs from the store.
|
||||
func Resolve(ctx context.Context, store Store, orgID valuer.UUID, postable []PostableTag, createdBy string) ([]*Tag, []*Tag, error) {
|
||||
if len(postable) == 0 {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
existing, err := store.List(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
internalNameToExistingTag := make(map[string]*Tag, len(existing))
|
||||
for _, t := range existing {
|
||||
internalNameToExistingTag[t.InternalName] = t
|
||||
}
|
||||
|
||||
seen := make(map[string]struct{}, len(postable))
|
||||
toCreate := make([]*Tag, 0)
|
||||
matched := make([]*Tag, 0)
|
||||
|
||||
for _, p := range postable {
|
||||
cleanedName, err := cleanupName(p.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
matchedName, matchedInternalName := matchCasingWithExistingTags(cleanedName, existing)
|
||||
if _, dup := seen[matchedInternalName]; dup {
|
||||
continue
|
||||
}
|
||||
seen[matchedInternalName] = struct{}{}
|
||||
|
||||
if existingTag, ok := internalNameToExistingTag[matchedInternalName]; ok {
|
||||
matched = append(matched, existingTag)
|
||||
continue
|
||||
}
|
||||
toCreate = append(toCreate, NewTag(orgID, matchedName, matchedInternalName, createdBy))
|
||||
}
|
||||
|
||||
return toCreate, matched, nil
|
||||
}
|
||||
|
||||
func cleanupName(name string) (string, error) {
|
||||
trimmed := strings.TrimSpace(name)
|
||||
raw := strings.Split(trimmed, HierarchySeparator)
|
||||
segments := make([]string, 0, len(raw))
|
||||
for _, seg := range raw {
|
||||
seg = strings.TrimSpace(seg)
|
||||
if seg == "" {
|
||||
continue
|
||||
}
|
||||
segments = append(segments, seg)
|
||||
}
|
||||
if len(segments) == 0 {
|
||||
return "", errors.Newf(errors.TypeInvalidInput, ErrCodeTagInvalidName, "tag name cannot be empty")
|
||||
}
|
||||
|
||||
for _, seg := range segments {
|
||||
if strings.Contains(seg, InternalSeparator) {
|
||||
return "", errors.Newf(errors.TypeInvalidInput, ErrCodeTagInvalidName, "tag name segment %q cannot contain %q", seg, InternalSeparator)
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(segments, HierarchySeparator), nil
|
||||
}
|
||||
|
||||
func buildInternalName(cleanedName string) string {
|
||||
return strings.ToLower(strings.ReplaceAll(cleanedName, HierarchySeparator, InternalSeparator))
|
||||
}
|
||||
|
||||
// matchCasingWithExistingTags returns the display name and internal name to use for
|
||||
// a user-supplied tag, given the existing tags in the org. If an existing tag
|
||||
// has the same internal name, its display name (casing) is reused. Otherwise
|
||||
// the existing tag with the longest segment-wise (case-insensitive) common
|
||||
// prefix lends its casing to the matching segments — the remaining input
|
||||
// segments keep the user-supplied casing. With no overlap, the input is
|
||||
// returned as-is.
|
||||
func matchCasingWithExistingTags(inputCleaned string, existing []*Tag) (canonicalName string, canonicalInternalName string) {
|
||||
inputSegments := strings.Split(inputCleaned, HierarchySeparator)
|
||||
inputInternal := buildInternalName(inputCleaned)
|
||||
|
||||
var bestMatch *Tag
|
||||
bestMatchLen := 0
|
||||
for _, tag := range existing {
|
||||
if tag.InternalName == inputInternal {
|
||||
return tag.Name, tag.InternalName
|
||||
}
|
||||
existingSegments := strings.Split(tag.Name, HierarchySeparator)
|
||||
matchLen := commonSegmentPrefix(inputSegments, existingSegments)
|
||||
if matchLen > bestMatchLen {
|
||||
bestMatch = tag
|
||||
bestMatchLen = matchLen
|
||||
}
|
||||
}
|
||||
|
||||
if bestMatch == nil {
|
||||
return inputCleaned, inputInternal
|
||||
}
|
||||
|
||||
existingSegments := strings.Split(bestMatch.Name, HierarchySeparator)
|
||||
canonicalSegments := append(existingSegments[:bestMatchLen:bestMatchLen], inputSegments[bestMatchLen:]...)
|
||||
canonical := strings.Join(canonicalSegments, HierarchySeparator)
|
||||
return canonical, buildInternalName(canonical)
|
||||
}
|
||||
|
||||
// returns 1 + (the index till which the two arrays are equal)
|
||||
// for eg ["a", "bcd", "efg", "h"] and ["a", "bcd", "ef"] returns 2
|
||||
func commonSegmentPrefix(a, b []string) int {
|
||||
n := min(len(a), len(b))
|
||||
for i := range n {
|
||||
if !strings.EqualFold(a[i], b[i]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package tagtypes
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type EntityType struct{ valuer.String }
|
||||
|
||||
func MustNewEntityType(name string) EntityType {
|
||||
return EntityType{valuer.NewString(name)}
|
||||
}
|
||||
|
||||
type TagRelation struct {
|
||||
bun.BaseModel `bun:"table:tag_relations,alias:tag_relations"`
|
||||
|
||||
EntityType EntityType `json:"entityType" required:"true" bun:"entity_type,type:text,notnull"`
|
||||
EntityID valuer.UUID `json:"entityId" required:"true" bun:"entity_id,pk,type:text,notnull"`
|
||||
TagID valuer.UUID `json:"tagId" required:"true" bun:"tag_id,pk,type:text,notnull"`
|
||||
OrgID valuer.UUID `json:"orgId" required:"true" bun:"org_id,type:text,notnull"`
|
||||
}
|
||||
|
||||
func NewTagRelation(orgID valuer.UUID, entityType EntityType, entityID valuer.UUID, tagID valuer.UUID) *TagRelation {
|
||||
return &TagRelation{
|
||||
EntityType: entityType,
|
||||
EntityID: entityID,
|
||||
TagID: tagID,
|
||||
OrgID: orgID,
|
||||
}
|
||||
}
|
||||
|
||||
func NewTagRelations(orgID valuer.UUID, entityType EntityType, entityID valuer.UUID, tagIDs []valuer.UUID) []*TagRelation {
|
||||
relations := make([]*TagRelation, 0, len(tagIDs))
|
||||
for _, tagID := range tagIDs {
|
||||
relations = append(relations, NewTagRelation(orgID, entityType, entityID, tagID))
|
||||
}
|
||||
return relations
|
||||
}
|
||||
@@ -1,236 +0,0 @@
|
||||
package tagtypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCleanupName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want string
|
||||
wantError bool
|
||||
}{
|
||||
{name: "single segment", input: "prod", want: "prod"},
|
||||
{name: "two segments", input: "team/blr", want: "team/blr"},
|
||||
{name: "three segments", input: "team/BLR/platform", want: "team/BLR/platform"},
|
||||
{name: "leading whitespace", input: " prod", want: "prod"},
|
||||
{name: "trailing whitespace", input: "prod ", want: "prod"},
|
||||
{name: "leading separator", input: "/prod", want: "prod"},
|
||||
{name: "trailing separator", input: "prod/", want: "prod"},
|
||||
{name: "consecutive separators collapsed", input: "team//blr", want: "team/blr"},
|
||||
{name: "many separators collapsed", input: "team///blr////platform", want: "team/blr/platform"},
|
||||
{name: "whitespace within segments", input: "team/ blr ", want: "team/blr"},
|
||||
{name: "empty rejected", input: "", wantError: true},
|
||||
{name: "only whitespace rejected", input: " ", wantError: true},
|
||||
{name: "only separators rejected", input: "///", wantError: true},
|
||||
{name: "internal separator rejected", input: "team/foo::bar", wantError: true},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := cleanupName(tc.input)
|
||||
if tc.wantError {
|
||||
require.Error(t, err)
|
||||
return
|
||||
}
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildInternalName(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{input: "prod", want: "prod"},
|
||||
{input: "Prod", want: "prod"},
|
||||
{input: "team/BLR/platform", want: "team::blr::platform"},
|
||||
{input: "TEAM/BLR", want: "team::blr"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.input, func(t *testing.T) {
|
||||
assert.Equal(t, tc.want, buildInternalName(tc.input))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchCasingWithExistingTags(t *testing.T) {
|
||||
existing := []*Tag{
|
||||
{Name: "team/BLR", InternalName: "team::blr"},
|
||||
{Name: "team/BLR/Pulse", InternalName: "team::blr::pulse"},
|
||||
{Name: "Database", InternalName: "database"},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantName string
|
||||
wantInternalName string
|
||||
}{
|
||||
{
|
||||
name: "exact match reuses casing",
|
||||
input: "team/blr",
|
||||
wantName: "team/BLR",
|
||||
wantInternalName: "team::blr",
|
||||
},
|
||||
{
|
||||
name: "exact match reuses casing for deeper tag",
|
||||
input: "TEAM/blr/pulse",
|
||||
wantName: "team/BLR/Pulse",
|
||||
wantInternalName: "team::blr::pulse",
|
||||
},
|
||||
{
|
||||
name: "prefix match reuses prefix casing and keeps remainder",
|
||||
input: "team/blr/platform",
|
||||
wantName: "team/BLR/platform",
|
||||
wantInternalName: "team::blr::platform",
|
||||
},
|
||||
{
|
||||
name: "longest prefix wins",
|
||||
input: "team/blr/pulse/sub",
|
||||
wantName: "team/BLR/Pulse/sub",
|
||||
wantInternalName: "team::blr::pulse::sub",
|
||||
},
|
||||
{
|
||||
name: "no match returns input as-is",
|
||||
input: "Brand-New/Tag",
|
||||
wantName: "Brand-New/Tag",
|
||||
wantInternalName: "brand-new::tag",
|
||||
},
|
||||
{
|
||||
name: "single segment exact match",
|
||||
input: "DATABASE",
|
||||
wantName: "Database",
|
||||
wantInternalName: "database",
|
||||
},
|
||||
{
|
||||
name: "input that shares text but not segment boundary is not a prefix match",
|
||||
input: "teams/blr",
|
||||
wantName: "teams/blr",
|
||||
wantInternalName: "teams::blr",
|
||||
},
|
||||
{
|
||||
name: "prefix of an existing tag",
|
||||
input: "TEAM",
|
||||
wantName: "team",
|
||||
wantInternalName: "team",
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cleaned, err := cleanupName(tc.input)
|
||||
require.NoError(t, err)
|
||||
gotName, gotInternal := matchCasingWithExistingTags(cleaned, existing)
|
||||
assert.Equal(t, tc.wantName, gotName)
|
||||
assert.Equal(t, tc.wantInternalName, gotInternal)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchCasingWithExistingTags_NoTagsExist(t *testing.T) {
|
||||
cleaned, err := cleanupName("Foo/Bar")
|
||||
require.NoError(t, err)
|
||||
name, internal := matchCasingWithExistingTags(cleaned, nil)
|
||||
assert.Equal(t, "Foo/Bar", name)
|
||||
assert.Equal(t, "foo::bar", internal)
|
||||
}
|
||||
|
||||
type fakeStore struct {
|
||||
tags []*Tag
|
||||
listCallCount int
|
||||
}
|
||||
|
||||
func (f *fakeStore) List(_ context.Context, _ valuer.UUID) ([]*Tag, error) {
|
||||
f.listCallCount++
|
||||
out := make([]*Tag, len(f.tags))
|
||||
copy(out, f.tags)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (f *fakeStore) Create(_ context.Context, tags []*Tag) ([]*Tag, error) {
|
||||
return tags, nil
|
||||
}
|
||||
|
||||
func (f *fakeStore) CreateRelations(_ context.Context, _ []*TagRelation) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeStore) ListByEntity(_ context.Context, _ valuer.UUID) ([]*Tag, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *fakeStore) DeleteRelationsExcept(_ context.Context, _ valuer.UUID, _ []valuer.UUID) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestResolve(t *testing.T) {
|
||||
t.Run("empty input does not hit store", func(t *testing.T) {
|
||||
store := &fakeStore{}
|
||||
toCreate, matched, err := Resolve(context.Background(), store, valuer.GenerateUUID(), nil, "u@signoz.io")
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, toCreate)
|
||||
assert.Empty(t, matched)
|
||||
assert.Zero(t, store.listCallCount, "should not hit store when input is empty")
|
||||
})
|
||||
|
||||
t.Run("creates missing tags and reuses existing", func(t *testing.T) {
|
||||
orgID := valuer.GenerateUUID()
|
||||
dbTag := NewTag(orgID, "team/BLR", "team::blr", "seed")
|
||||
dbTag2 := NewTag(orgID, "Database", "database", "seed")
|
||||
store := &fakeStore{tags: []*Tag{dbTag, dbTag2}}
|
||||
|
||||
toCreate, matched, err := Resolve(context.Background(), store, orgID, []PostableTag{
|
||||
{Name: "team/blr/platform"},
|
||||
{Name: "DATABASE"},
|
||||
{Name: "Brand-New"},
|
||||
}, "u@signoz.io")
|
||||
require.NoError(t, err)
|
||||
|
||||
createdInternalNames := []string{}
|
||||
createdNames := map[string]string{}
|
||||
for _, tg := range toCreate {
|
||||
createdInternalNames = append(createdInternalNames, tg.InternalName)
|
||||
createdNames[tg.InternalName] = tg.Name
|
||||
}
|
||||
assert.ElementsMatch(t, []string{"team::blr::platform", "brand-new"}, createdInternalNames,
|
||||
"only the two missing tags should be returned for insertion")
|
||||
assert.Equal(t, "team/BLR/platform", createdNames["team::blr::platform"], "should inherit casing from existing parent")
|
||||
assert.Equal(t, "Brand-New", createdNames["brand-new"], "should keep input casing when no existing match")
|
||||
|
||||
require.Len(t, matched, 1, "DATABASE should hit the existing 'Database' tag")
|
||||
assert.Same(t, dbTag2, matched[0], "matched should return the existing pointer with its authoritative ID")
|
||||
})
|
||||
|
||||
t.Run("dedupes inputs that map to the same internal name", func(t *testing.T) {
|
||||
orgID := valuer.GenerateUUID()
|
||||
store := &fakeStore{}
|
||||
|
||||
toCreate, matched, err := Resolve(context.Background(), store, orgID, []PostableTag{
|
||||
{Name: "Foo/Bar"},
|
||||
{Name: "foo/bar"},
|
||||
{Name: "FOO/BAR"},
|
||||
}, "u@signoz.io")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Empty(t, matched)
|
||||
require.Len(t, toCreate, 1, "duplicate inputs must collapse into a single insert")
|
||||
assert.Equal(t, "Foo/Bar", toCreate[0].Name)
|
||||
assert.Equal(t, "foo::bar", toCreate[0].InternalName)
|
||||
})
|
||||
|
||||
t.Run("propagates validation error from any input", func(t *testing.T) {
|
||||
store := &fakeStore{}
|
||||
_, _, err := Resolve(context.Background(), store, valuer.GenerateUUID(), []PostableTag{
|
||||
{Name: "valid"},
|
||||
{Name: ""},
|
||||
}, "u@signoz.io")
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
import json
|
||||
from collections.abc import Callable
|
||||
from http import HTTPStatus
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.types import Operation, SigNoz
|
||||
|
||||
_PERSES_FIXTURE = (
|
||||
Path(__file__).parents[4]
|
||||
/ "pkg/types/dashboardtypes/dashboardtypesv2/testdata/perses.json"
|
||||
)
|
||||
|
||||
|
||||
def _post_dashboard(signoz: SigNoz, token: str, body: dict) -> requests.Response:
|
||||
return requests.post(
|
||||
signoz.self.host_configs["8080"].get("/api/v2/dashboards"),
|
||||
json=body,
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
timeout=2,
|
||||
)
|
||||
|
||||
|
||||
def test_empty_body_rejected_for_missing_schema_version(
|
||||
signoz: SigNoz,
|
||||
create_user_admin: Operation, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
):
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
response = _post_dashboard(signoz, admin_token, {})
|
||||
|
||||
assert response.status_code == HTTPStatus.BAD_REQUEST
|
||||
body = response.json()
|
||||
assert body["status"] == "error"
|
||||
assert body["error"]["code"] == "dashboard_invalid_input"
|
||||
assert body["error"]["message"] == 'metadata.schemaVersion must be "v6", got ""'
|
||||
|
||||
|
||||
def test_missing_display_name_rejected(
|
||||
signoz: SigNoz,
|
||||
create_user_admin: Operation, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
):
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
response = _post_dashboard(signoz, admin_token, {"metadata": {"schemaVersion": "v6"}})
|
||||
|
||||
assert response.status_code == HTTPStatus.BAD_REQUEST
|
||||
body = response.json()
|
||||
assert body["status"] == "error"
|
||||
assert body["error"]["code"] == "dashboard_invalid_input"
|
||||
assert body["error"]["message"] == "data.display.name is required"
|
||||
|
||||
|
||||
def test_minimal_valid_body_creates_dashboard(
|
||||
signoz: SigNoz,
|
||||
create_user_admin: Operation, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
):
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
response = _post_dashboard(
|
||||
signoz,
|
||||
admin_token,
|
||||
{
|
||||
"metadata": {"schemaVersion": "v6"},
|
||||
"data": {"display": {"name": "test name"}},
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.CREATED
|
||||
body = response.json()
|
||||
assert body["status"] == "success"
|
||||
data = body["data"]
|
||||
assert data["info"]["data"]["display"]["name"] == "test name"
|
||||
assert data["info"]["metadata"]["schemaVersion"] == "v6"
|
||||
|
||||
|
||||
def test_unknown_root_field_rejected(
|
||||
signoz: SigNoz,
|
||||
create_user_admin: Operation, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
):
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
response = _post_dashboard(
|
||||
signoz,
|
||||
admin_token,
|
||||
{
|
||||
"metadata": {"schemaVersion": "v6"},
|
||||
"data": {"display": {"name": "test name"}},
|
||||
"unknownfieldattheroot": "shouldgiveanerror",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.BAD_REQUEST
|
||||
body = response.json()
|
||||
assert body["status"] == "error"
|
||||
assert body["error"]["code"] == "dashboard_invalid_input"
|
||||
assert body["error"]["message"] == 'json: unknown field "unknownfieldattheroot"'
|
||||
|
||||
|
||||
def test_unknown_nested_field_rejected(
|
||||
signoz: SigNoz,
|
||||
create_user_admin: Operation, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
):
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
response = _post_dashboard(
|
||||
signoz,
|
||||
admin_token,
|
||||
{
|
||||
"metadata": {"schemaVersion": "v6"},
|
||||
"data": {
|
||||
"display": {
|
||||
"name": "test name",
|
||||
"unknownfieldinside": "shouldgiveanerror",
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.BAD_REQUEST
|
||||
body = response.json()
|
||||
assert body["status"] == "error"
|
||||
assert body["error"]["code"] == "dashboard_invalid_input"
|
||||
assert body["error"]["message"] == 'json: unknown field "unknownfieldinside"'
|
||||
|
||||
|
||||
def test_perses_fixture_creates_dashboard(
|
||||
signoz: SigNoz,
|
||||
create_user_admin: Operation, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
):
|
||||
"""The perses.json fixture is the kitchen-sink dashboard the schema tests
|
||||
use; round-tripping it through the create API exercises the full plugin
|
||||
surface end-to-end."""
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
data = json.loads(_PERSES_FIXTURE.read_text())
|
||||
response = _post_dashboard(
|
||||
signoz,
|
||||
admin_token,
|
||||
{"metadata": {"schemaVersion": "v6"}, "data": data},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.CREATED
|
||||
body = response.json()
|
||||
assert body["status"] == "success"
|
||||
assert body["data"]["info"]["data"]["display"]["name"] == data["display"]["name"]
|
||||
|
||||
|
||||
def test_tag_casing_is_inherited_from_existing_parent(
|
||||
signoz: SigNoz,
|
||||
create_user_admin: Operation, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
):
|
||||
"""A second dashboard tagged with a sibling under a casing-variant parent
|
||||
path should adopt the existing parent's casing while keeping the
|
||||
user-supplied casing for the new leaf segment."""
|
||||
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
first = _post_dashboard(
|
||||
signoz,
|
||||
admin_token,
|
||||
{
|
||||
"metadata": {"schemaVersion": "v6"},
|
||||
"data": {"display": {"name": "dac"}},
|
||||
"tags": [{"name": "engineering/US/NYC"}],
|
||||
},
|
||||
)
|
||||
assert first.status_code == HTTPStatus.CREATED
|
||||
first_tags = first.json()["data"]["info"]["tags"]
|
||||
assert first_tags == [{"name": "engineering/US/NYC"}]
|
||||
|
||||
second = _post_dashboard(
|
||||
signoz,
|
||||
admin_token,
|
||||
{
|
||||
"metadata": {"schemaVersion": "v6"},
|
||||
"data": {"display": {"name": "dac"}},
|
||||
"tags": [{"name": "engineering/us/SF"}],
|
||||
},
|
||||
)
|
||||
assert second.status_code == HTTPStatus.CREATED
|
||||
second_tags = second.json()["data"]["info"]["tags"]
|
||||
assert second_tags == [{"name": "engineering/US/SF"}]
|
||||
Reference in New Issue
Block a user