mirror of
https://github.com/SigNoz/signoz.git
synced 2026-04-30 15:40:27 +01:00
Compare commits
2 Commits
infraM/v2_
...
traceop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b9e383994 | ||
|
|
f4e5534e53 |
@@ -104,7 +104,12 @@ export const usePanelContextMenu = ({
|
||||
}
|
||||
|
||||
if (data && data?.record?.queryName) {
|
||||
onClick(data.coord, { ...data.record, label: data.label, timeRange });
|
||||
onClick(data.coord, {
|
||||
...data.record,
|
||||
label: data.label,
|
||||
seriesColor: data.seriesColor,
|
||||
timeRange,
|
||||
});
|
||||
}
|
||||
},
|
||||
[onClick, queryResponse],
|
||||
|
||||
@@ -196,6 +196,7 @@ export const getUplotClickData = ({
|
||||
coord: { x: number; y: number };
|
||||
record: { queryName: string; filters: FilterData[] };
|
||||
label: string | React.ReactNode;
|
||||
seriesColor?: string;
|
||||
} | null => {
|
||||
if (!queryData?.queryName || !metric) {
|
||||
return null;
|
||||
@@ -208,6 +209,8 @@ export const getUplotClickData = ({
|
||||
|
||||
// Generate label from focusedSeries data
|
||||
let label: string | React.ReactNode = '';
|
||||
const seriesColor = focusedSeries?.color;
|
||||
|
||||
if (focusedSeries && focusedSeries.seriesName) {
|
||||
label = (
|
||||
<span style={{ color: focusedSeries.color }}>
|
||||
@@ -223,6 +226,7 @@ export const getUplotClickData = ({
|
||||
},
|
||||
record,
|
||||
label,
|
||||
seriesColor,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -237,6 +241,7 @@ export const getPieChartClickData = (
|
||||
queryName: string;
|
||||
filters: FilterData[];
|
||||
label: string | React.ReactNode;
|
||||
seriesColor?: string;
|
||||
} | null => {
|
||||
const { metric, queryName } = arc.data.record;
|
||||
if (!queryName || !metric) {
|
||||
@@ -248,6 +253,7 @@ export const getPieChartClickData = (
|
||||
queryName,
|
||||
filters: getFiltersFromMetric(metric), // TODO: add where clause query as well.
|
||||
label,
|
||||
seriesColor: arc.data.color,
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ export interface AggregateData {
|
||||
endTime: number;
|
||||
};
|
||||
label?: string | React.ReactNode;
|
||||
seriesColor?: string;
|
||||
}
|
||||
|
||||
const useAggregateDrilldown = ({
|
||||
|
||||
@@ -228,7 +228,13 @@ const useBaseAggregateOptions = ({
|
||||
return (
|
||||
<ContextMenu.Item
|
||||
key={key}
|
||||
icon={isLoading ? <LoadingOutlined spin /> : icon}
|
||||
icon={
|
||||
isLoading ? (
|
||||
<LoadingOutlined spin />
|
||||
) : (
|
||||
<span style={{ color: aggregateData?.seriesColor }}>{icon}</span>
|
||||
)
|
||||
}
|
||||
onClick={(): void => onClick()}
|
||||
disabled={isLoading}
|
||||
>
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
gap: 8px;
|
||||
padding: 8px;
|
||||
cursor: pointer;
|
||||
color: var(--foreground);
|
||||
color: var(--muted-foreground);
|
||||
font-family: Inter;
|
||||
font-size: var(--font-size-sm);
|
||||
font-weight: 600;
|
||||
@@ -20,13 +20,10 @@
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
|
||||
&:hover {
|
||||
background-color: var(--l1-background);
|
||||
}
|
||||
|
||||
&:hover,
|
||||
&:focus {
|
||||
outline: none;
|
||||
background-color: var(--l1-background);
|
||||
background-color: var(--l2-background-hover);
|
||||
}
|
||||
|
||||
&.disabled {
|
||||
@@ -47,7 +44,8 @@
|
||||
}
|
||||
|
||||
&:hover {
|
||||
background-color: var(--bg-cherry-100);
|
||||
background-color: var(--danger-background);
|
||||
color: var(--l1-foreground);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,73 +72,24 @@
|
||||
}
|
||||
|
||||
.context-menu-header {
|
||||
padding-bottom: 4px;
|
||||
border-bottom: 1px solid var(--l1-border);
|
||||
padding: 8px 12px;
|
||||
border-bottom: 1px solid var(--l2-border);
|
||||
color: var(--muted-foreground);
|
||||
}
|
||||
|
||||
// Target the popover inner specifically for context menu
|
||||
.context-menu .ant-popover-inner {
|
||||
padding: 12px 8px !important;
|
||||
// max-height: 254px !important;
|
||||
max-width: 300px !important;
|
||||
padding: 0;
|
||||
border-radius: 6px;
|
||||
max-width: 300px;
|
||||
background: var(--l2-background) !important;
|
||||
border: 1px solid var(--l2-border) !important;
|
||||
}
|
||||
|
||||
// Dark mode support
|
||||
.darkMode {
|
||||
.context-menu-item {
|
||||
color: var(--muted-foreground);
|
||||
|
||||
&:hover,
|
||||
&:focus {
|
||||
background-color: var(--l2-background);
|
||||
}
|
||||
|
||||
&.danger {
|
||||
color: var(--bg-cherry-400);
|
||||
|
||||
.icon {
|
||||
color: var(--bg-cherry-400);
|
||||
}
|
||||
|
||||
&:hover {
|
||||
background-color: var(--danger-background);
|
||||
color: var(--l1-foreground);
|
||||
}
|
||||
}
|
||||
|
||||
.icon {
|
||||
color: var(--bg-robin-400);
|
||||
}
|
||||
}
|
||||
|
||||
.context-menu-header {
|
||||
border-bottom: 1px solid var(--l1-border);
|
||||
color: var(--muted-foreground);
|
||||
}
|
||||
|
||||
// Set the menu popover background
|
||||
.context-menu .ant-popover-inner {
|
||||
background: var(--l1-background) !important;
|
||||
border: 1px solid var(--border) !important;
|
||||
}
|
||||
}
|
||||
|
||||
// Context menu backdrop overlay
|
||||
.context-menu-backdrop {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100vw;
|
||||
height: 100vh;
|
||||
inset: 0;
|
||||
z-index: 9999;
|
||||
background: transparent;
|
||||
cursor: default;
|
||||
|
||||
// Prevent any pointer events from reaching elements behind
|
||||
pointer-events: auto;
|
||||
|
||||
// Ensure it covers the entire viewport including any scrollable areas
|
||||
position: fixed !important;
|
||||
inset: 0;
|
||||
}
|
||||
|
||||
@@ -48,23 +48,5 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v2/infra_monitoring/onboarding", handler.New(
|
||||
provider.authZ.ViewAccess(provider.infraMonitoringHandler.GetOnboarding),
|
||||
handler.OpenAPIDef{
|
||||
ID: "GetOnboarding",
|
||||
Tags: []string{"inframonitoring"},
|
||||
Summary: "Get Onboarding Status for Infra Monitoring",
|
||||
Description: "Returns the per-tab readiness of the infra-monitoring section selected by the 'type' query parameter (hosts, processes, pods, nodes, deployments, daemonsets, statefulsets, jobs, namespaces, clusters, volumes). For each collector receiver or processor that contributes required metrics or attributes, lists what is present and what is missing, with a prebuilt user-facing message and a docs link per missing component. Default-enabled metrics are those expected as soon as the receiver is configured; optional metrics require 'enabled: true' in receiver config. 'ready' is true only when every missing list is empty.",
|
||||
RequestQuery: new(inframonitoringtypes.PostableOnboarding),
|
||||
Response: new(inframonitoringtypes.Onboarding),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodGet).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -22,30 +22,6 @@ func NewHandler(m inframonitoring.Module) inframonitoring.Handler {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) GetOnboarding(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
var parsedReq inframonitoringtypes.PostableOnboarding
|
||||
if err := binding.Query.BindQuery(req.URL.Query(), &parsedReq); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.module.GetOnboarding(req.Context(), orgID, &parsedReq)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *handler) ListHosts(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
|
||||
@@ -434,57 +434,6 @@ func (m *module) getMetricsExistenceAndEarliestTime(ctx context.Context, metricN
|
||||
return missingMetrics, globalMinFirstReported, nil
|
||||
}
|
||||
|
||||
// getAttributesExistence returns the subset of attrNames that are missing —
|
||||
// i.e. have never been reported as a label on any of the given metricNames.
|
||||
// Presence is checked against distributed_metadata without a time-range filter.
|
||||
func (m *module) getAttributesExistence(ctx context.Context, metricNames, attrNames []string) ([]string, error) {
|
||||
if len(attrNames) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if len(metricNames) == 0 {
|
||||
return nil, errors.NewInternalf(errors.CodeInternal, "getAttributesExistence: metricNames must not be empty")
|
||||
}
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
sb.Select("attr_name", "count(*) AS cnt")
|
||||
sb.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, telemetrymetrics.AttributesMetadataTableName))
|
||||
sb.Where(
|
||||
sb.In("metric_name", sqlbuilder.List(metricNames)),
|
||||
sb.In("attr_name", sqlbuilder.List(attrNames)),
|
||||
)
|
||||
sb.GroupBy("attr_name")
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
present := make(map[string]bool, len(attrNames))
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var cnt uint64
|
||||
if err := rows.Scan(&name, &cnt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if name != "" && cnt > 0 {
|
||||
present[name] = true
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var missing []string
|
||||
for _, a := range attrNames {
|
||||
if !present[a] {
|
||||
missing = append(missing, a)
|
||||
}
|
||||
}
|
||||
return missing, nil
|
||||
}
|
||||
|
||||
// getMetadata fetches the latest values of additionalCols for each unique combination of groupBy keys,
|
||||
// within the given time range and metric names. It uses argMax(tuple(...), unix_milli) to ensure
|
||||
// we always pick attribute values from the latest timestamp for each group.
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
package implinframonitoring
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
|
||||
// The types in this file are only used within the implinframonitoring package, and are not exposed outside.
|
||||
// They are primarily used for internal processing and structuring of data within the module's implementation.
|
||||
|
||||
@@ -25,50 +23,3 @@ type podPhaseCounts struct {
|
||||
Failed int
|
||||
Unknown int
|
||||
}
|
||||
|
||||
// bucketSplit carries the up-to-six entries a single spec bucket contributes
|
||||
// to an onboarding response. Any field may be nil if the bucket doesn't
|
||||
// populate that dimension.
|
||||
type bucketSplit struct {
|
||||
PresentDefault *inframonitoringtypes.MetricsComponentEntry
|
||||
PresentOptional *inframonitoringtypes.MetricsComponentEntry
|
||||
PresentAttrs *inframonitoringtypes.AttributesComponentEntry
|
||||
MissingDefault *inframonitoringtypes.MissingMetricsComponentEntry
|
||||
MissingOptional *inframonitoringtypes.MissingMetricsComponentEntry
|
||||
MissingAttrs *inframonitoringtypes.MissingAttributesComponentEntry
|
||||
}
|
||||
|
||||
// onboardingComponentBucket is a single collector component's contribution
|
||||
// toward a single infra-monitoring tab's readiness. Any of the three dimension
|
||||
// slices (DefaultMetrics, OptionalMetrics, RequiredAttrs) may be empty — the
|
||||
// bucketizer in Phase 4 skips empty dimensions.
|
||||
type onboardingComponentBucket struct {
|
||||
Component inframonitoringtypes.AssociatedComponent
|
||||
DefaultMetrics []string
|
||||
OptionalMetrics []string
|
||||
RequiredAttrs []string
|
||||
DocumentationLink string
|
||||
}
|
||||
|
||||
// onboardingSpec defines, for one OnboardingType, the full set of
|
||||
// component-scoped buckets that must be satisfied for the tab to be ready.
|
||||
type onboardingSpec struct {
|
||||
Buckets []onboardingComponentBucket
|
||||
}
|
||||
|
||||
func (s onboardingSpec) getAllMetrics() []string {
|
||||
var out []string
|
||||
for _, b := range s.Buckets {
|
||||
out = append(out, b.DefaultMetrics...)
|
||||
out = append(out, b.OptionalMetrics...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (s onboardingSpec) getAllAttrs() []string {
|
||||
var out []string
|
||||
for _, b := range s.Buckets {
|
||||
out = append(out, b.RequiredAttrs...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -47,80 +47,6 @@ func NewModule(
|
||||
}
|
||||
}
|
||||
|
||||
// GetOnboarding runs a per-type readiness check: for the requested
|
||||
// infra-monitoring tab, reports which required metrics and attributes are
|
||||
// present vs missing, grouped by the collector component that produces them.
|
||||
// Ready is true iff every missing list is empty.
|
||||
func (m *module) GetOnboarding(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableOnboarding) (*inframonitoringtypes.Onboarding, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
spec, err := getSpecForType(req.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allMetrics := spec.getAllMetrics()
|
||||
allAttrs := spec.getAllAttrs()
|
||||
|
||||
missingMetricsList, _, err := m.getMetricsExistenceAndEarliestTime(ctx, allMetrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missingMetricsMap := make(map[string]bool, len(missingMetricsList))
|
||||
for _, name := range missingMetricsList {
|
||||
missingMetricsMap[name] = true
|
||||
}
|
||||
|
||||
missingAttrsList, err := m.getAttributesExistence(ctx, allMetrics, allAttrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
missingAttrsMap := make(map[string]bool, len(missingAttrsList))
|
||||
for _, name := range missingAttrsList {
|
||||
missingAttrsMap[name] = true
|
||||
}
|
||||
|
||||
resp := &inframonitoringtypes.Onboarding{
|
||||
Type: req.Type,
|
||||
PresentDefaultEnabledMetrics: []inframonitoringtypes.MetricsComponentEntry{},
|
||||
PresentOptionalMetrics: []inframonitoringtypes.MetricsComponentEntry{},
|
||||
PresentRequiredAttributes: []inframonitoringtypes.AttributesComponentEntry{},
|
||||
MissingDefaultEnabledMetrics: []inframonitoringtypes.MissingMetricsComponentEntry{},
|
||||
MissingOptionalMetrics: []inframonitoringtypes.MissingMetricsComponentEntry{},
|
||||
MissingRequiredAttributes: []inframonitoringtypes.MissingAttributesComponentEntry{},
|
||||
}
|
||||
|
||||
for _, b := range spec.Buckets {
|
||||
s := splitBucket(b, missingMetricsMap, missingAttrsMap)
|
||||
if s.PresentDefault != nil {
|
||||
resp.PresentDefaultEnabledMetrics = append(resp.PresentDefaultEnabledMetrics, *s.PresentDefault)
|
||||
}
|
||||
if s.PresentOptional != nil {
|
||||
resp.PresentOptionalMetrics = append(resp.PresentOptionalMetrics, *s.PresentOptional)
|
||||
}
|
||||
if s.PresentAttrs != nil {
|
||||
resp.PresentRequiredAttributes = append(resp.PresentRequiredAttributes, *s.PresentAttrs)
|
||||
}
|
||||
if s.MissingDefault != nil {
|
||||
resp.MissingDefaultEnabledMetrics = append(resp.MissingDefaultEnabledMetrics, *s.MissingDefault)
|
||||
}
|
||||
if s.MissingOptional != nil {
|
||||
resp.MissingOptionalMetrics = append(resp.MissingOptionalMetrics, *s.MissingOptional)
|
||||
}
|
||||
if s.MissingAttrs != nil {
|
||||
resp.MissingRequiredAttributes = append(resp.MissingRequiredAttributes, *s.MissingAttrs)
|
||||
}
|
||||
}
|
||||
|
||||
resp.Ready = len(resp.MissingDefaultEnabledMetrics) == 0 &&
|
||||
len(resp.MissingOptionalMetrics) == 0 &&
|
||||
len(resp.MissingRequiredAttributes) == 0
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *module) ListHosts(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableHosts) (*inframonitoringtypes.Hosts, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
)
|
||||
|
||||
// splitBucket partitions one component bucket's metric and attribute lists
|
||||
// against the module-wide missing sets into up to six response entries.
|
||||
// Empty partitions are left nil so callers can skip them.
|
||||
func splitBucket(b onboardingComponentBucket, missingMetrics, missingAttrs map[string]bool) bucketSplit {
|
||||
var s bucketSplit
|
||||
presentDef, missDef := partitionList(b.DefaultMetrics, missingMetrics)
|
||||
if len(presentDef) > 0 {
|
||||
s.PresentDefault = &inframonitoringtypes.MetricsComponentEntry{
|
||||
Metrics: presentDef,
|
||||
AssociatedComponent: b.Component,
|
||||
}
|
||||
}
|
||||
if len(missDef) > 0 {
|
||||
s.MissingDefault = &inframonitoringtypes.MissingMetricsComponentEntry{
|
||||
MetricsComponentEntry: inframonitoringtypes.MetricsComponentEntry{
|
||||
Metrics: missDef,
|
||||
AssociatedComponent: b.Component,
|
||||
},
|
||||
Message: buildMissingDefaultMetricsMessage(missDef, b.Component.Name),
|
||||
DocumentationLink: b.DocumentationLink,
|
||||
}
|
||||
}
|
||||
|
||||
presentOpt, missOpt := partitionList(b.OptionalMetrics, missingMetrics)
|
||||
if len(presentOpt) > 0 {
|
||||
s.PresentOptional = &inframonitoringtypes.MetricsComponentEntry{
|
||||
Metrics: presentOpt,
|
||||
AssociatedComponent: b.Component,
|
||||
}
|
||||
}
|
||||
if len(missOpt) > 0 {
|
||||
s.MissingOptional = &inframonitoringtypes.MissingMetricsComponentEntry{
|
||||
MetricsComponentEntry: inframonitoringtypes.MetricsComponentEntry{
|
||||
Metrics: missOpt,
|
||||
AssociatedComponent: b.Component,
|
||||
},
|
||||
Message: buildMissingOptionalMetricsMessage(missOpt, b.Component.Name),
|
||||
DocumentationLink: b.DocumentationLink,
|
||||
}
|
||||
}
|
||||
|
||||
presentA, missA := partitionList(b.RequiredAttrs, missingAttrs)
|
||||
if len(presentA) > 0 {
|
||||
s.PresentAttrs = &inframonitoringtypes.AttributesComponentEntry{
|
||||
Attributes: presentA,
|
||||
AssociatedComponent: b.Component,
|
||||
}
|
||||
}
|
||||
if len(missA) > 0 {
|
||||
s.MissingAttrs = &inframonitoringtypes.MissingAttributesComponentEntry{
|
||||
AttributesComponentEntry: inframonitoringtypes.AttributesComponentEntry{
|
||||
Attributes: missA,
|
||||
AssociatedComponent: b.Component,
|
||||
},
|
||||
Message: buildMissingRequiredAttrsMessage(missA, b.Component.Name),
|
||||
DocumentationLink: b.DocumentationLink,
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// getSpecForType returns the onboardingSpec for a given OnboardingType, or an error if the type is invalid.
|
||||
func getSpecForType(t inframonitoringtypes.OnboardingType) (*onboardingSpec, error) {
|
||||
spec, ok := onboardingSpecs[t]
|
||||
if !ok {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "no onboarding spec for type: %s", t)
|
||||
}
|
||||
return &spec, nil
|
||||
}
|
||||
|
||||
// partitionList splits items into those NOT in `missing` and those in `missing`.
|
||||
// Preserves input order.
|
||||
func partitionList(items []string, missing map[string]bool) (present, miss []string) {
|
||||
for _, x := range items {
|
||||
if missing[x] {
|
||||
miss = append(miss, x)
|
||||
} else {
|
||||
present = append(present, x)
|
||||
}
|
||||
}
|
||||
return present, miss
|
||||
}
|
||||
|
||||
func buildMissingDefaultMetricsMessage(metrics []string, componentName string) string {
|
||||
return fmt.Sprintf(
|
||||
"Missing default metrics %s from %s. Learn how to configure here.",
|
||||
strings.Join(metrics, ", "), componentName,
|
||||
)
|
||||
}
|
||||
|
||||
func buildMissingOptionalMetricsMessage(metrics []string, componentName string) string {
|
||||
return fmt.Sprintf(
|
||||
"Missing optional metrics %s from %s. Learn how to enable here.",
|
||||
strings.Join(metrics, ", "), componentName,
|
||||
)
|
||||
}
|
||||
|
||||
func buildMissingRequiredAttrsMessage(attrs []string, componentName string) string {
|
||||
return fmt.Sprintf(
|
||||
"Missing required attributes %s from %s. Learn how to configure here.",
|
||||
strings.Join(attrs, ", "), componentName,
|
||||
)
|
||||
}
|
||||
@@ -1,364 +0,0 @@
|
||||
package implinframonitoring
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
|
||||
// Component names — the 5 OTel collector receivers/processors that produce
|
||||
// metrics and resource attributes consumed by infra-monitoring tabs. Bare
|
||||
// strings on purpose (not wrapped enums) — the list is open-ended enough that
|
||||
// an enum adds more friction than value.
|
||||
const (
|
||||
componentNameHostMetricsReceiver = "hostmetricsreceiver"
|
||||
componentNameKubeletStatsReceiver = "kubeletstatsreceiver"
|
||||
componentNameK8sClusterReceiver = "k8sclusterreceiver"
|
||||
componentNameResourceDetectionProcessor = "resourcedetectionprocessor"
|
||||
componentNameK8sAttributesProcessor = "k8sattributesprocessor"
|
||||
)
|
||||
|
||||
// Documentation links — one per component. User-facing; emitted on missing-entries.
|
||||
const (
|
||||
docLinkHostMetricsReceiver = "https://signoz.io/docs/infrastructure-monitoring/hostmetrics/#step-2-configure-the-collector"
|
||||
docLinkKubeletStatsReceiver = "https://signoz.io/docs/infrastructure-monitoring/k8s-metrics/#setting-up-kubelet-stats-monitoring"
|
||||
docLinkK8sClusterReceiver = "https://signoz.io/docs/infrastructure-monitoring/k8s-metrics/#setting-up-k8s-cluster-monitoring"
|
||||
docLinkResourceDetectionProcessor = "https://signoz.io/docs/infrastructure-monitoring/hostmetrics/#host-name-is-blankempty"
|
||||
docLinkK8sAttributesProcessor = "https://signoz.io/docs/infrastructure-monitoring/k8s-metrics/#2-enable-kubernetes-metadata"
|
||||
)
|
||||
|
||||
var (
|
||||
componentHostMetricsReceiver = inframonitoringtypes.AssociatedComponent{
|
||||
Type: inframonitoringtypes.OnboardingComponentTypeReceiver,
|
||||
Name: componentNameHostMetricsReceiver,
|
||||
}
|
||||
componentKubeletStatsReceiver = inframonitoringtypes.AssociatedComponent{
|
||||
Type: inframonitoringtypes.OnboardingComponentTypeReceiver,
|
||||
Name: componentNameKubeletStatsReceiver,
|
||||
}
|
||||
componentK8sClusterReceiver = inframonitoringtypes.AssociatedComponent{
|
||||
Type: inframonitoringtypes.OnboardingComponentTypeReceiver,
|
||||
Name: componentNameK8sClusterReceiver,
|
||||
}
|
||||
componentResourceDetectionProcessor = inframonitoringtypes.AssociatedComponent{
|
||||
Type: inframonitoringtypes.OnboardingComponentTypeProcessor,
|
||||
Name: componentNameResourceDetectionProcessor,
|
||||
}
|
||||
componentK8sAttributesProcessor = inframonitoringtypes.AssociatedComponent{
|
||||
Type: inframonitoringtypes.OnboardingComponentTypeProcessor,
|
||||
Name: componentNameK8sAttributesProcessor,
|
||||
}
|
||||
)
|
||||
|
||||
// onboardingSpecs is the single lookup table the module consults for a type's
|
||||
// readiness contract. Every OnboardingType value must have an entry here.
|
||||
var onboardingSpecs = map[inframonitoringtypes.OnboardingType]onboardingSpec{
|
||||
inframonitoringtypes.OnboardingTypeHosts: hostsSpec,
|
||||
inframonitoringtypes.OnboardingTypeProcesses: processesSpec,
|
||||
inframonitoringtypes.OnboardingTypePods: podsSpec,
|
||||
inframonitoringtypes.OnboardingTypeNodes: nodesSpec,
|
||||
inframonitoringtypes.OnboardingTypeDeployments: deploymentsSpec,
|
||||
inframonitoringtypes.OnboardingTypeDaemonsets: daemonsetsSpec,
|
||||
inframonitoringtypes.OnboardingTypeStatefulsets: statefulsetsSpec,
|
||||
inframonitoringtypes.OnboardingTypeJobs: jobsSpec,
|
||||
inframonitoringtypes.OnboardingTypeNamespaces: namespacesSpec,
|
||||
inframonitoringtypes.OnboardingTypeClusters: clustersSpec,
|
||||
inframonitoringtypes.OnboardingTypeVolumes: volumesSpec,
|
||||
}
|
||||
|
||||
// Per-type specs. Every metric and attribute is spelled out in its own spec
|
||||
// on purpose — no shared slices, no concatenation helpers. Repetition is
|
||||
// cheaper than indirection when auditing what each tab actually requires.
|
||||
|
||||
var hostsSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentHostMetricsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"system.cpu.time",
|
||||
"system.memory.usage",
|
||||
"system.cpu.load_average.15m",
|
||||
"system.filesystem.usage",
|
||||
},
|
||||
DocumentationLink: docLinkHostMetricsReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentResourceDetectionProcessor,
|
||||
RequiredAttrs: []string{"host.name"},
|
||||
DocumentationLink: docLinkResourceDetectionProcessor,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var processesSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentHostMetricsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"process.cpu.time",
|
||||
"process.memory.usage",
|
||||
},
|
||||
RequiredAttrs: []string{"process.pid"},
|
||||
DocumentationLink: docLinkHostMetricsReceiver,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var podsSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentKubeletStatsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.pod.cpu.usage",
|
||||
"k8s.pod.memory.working_set",
|
||||
},
|
||||
OptionalMetrics: []string{
|
||||
"k8s.pod.cpu_request_utilization",
|
||||
"k8s.pod.cpu_limit_utilization",
|
||||
"k8s.pod.memory_request_utilization",
|
||||
"k8s.pod.memory_limit_utilization",
|
||||
},
|
||||
DocumentationLink: docLinkKubeletStatsReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sClusterReceiver,
|
||||
DefaultMetrics: []string{"k8s.pod.phase"},
|
||||
DocumentationLink: docLinkK8sClusterReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sAttributesProcessor,
|
||||
RequiredAttrs: []string{"k8s.pod.uid"},
|
||||
DocumentationLink: docLinkK8sAttributesProcessor,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var nodesSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentKubeletStatsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.node.cpu.usage",
|
||||
"k8s.node.memory.working_set",
|
||||
},
|
||||
DocumentationLink: docLinkKubeletStatsReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sClusterReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.node.allocatable_cpu",
|
||||
"k8s.node.allocatable_memory", // k8s.node.allocatable_cpu and k8s.node.allocatable_memory are
|
||||
// controlled by allocatable_types_to_report config option (Check // https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/4f9a578b210a6dcb9f9bf47942f27208b5765298/receiver/k8sclusterreceiver/metadata.yaml#L805-L806)
|
||||
"k8s.node.condition_ready", // # k8s.node.condition_* metrics (k8s.node.condition_ready, k8s.node.condition_memory_pressure, etc) are controlled# by node_conditions_to_report config option.
|
||||
// By default, only k8s.node.condition_ready is enabled. (Check https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/4f9a578b210a6dcb9f9bf47942f27208b5765298/receiver/k8sclusterreceiver/metadata.yaml#L802)
|
||||
},
|
||||
DocumentationLink: docLinkK8sClusterReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sAttributesProcessor,
|
||||
RequiredAttrs: []string{"k8s.node.name"},
|
||||
DocumentationLink: docLinkK8sAttributesProcessor,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var deploymentsSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentKubeletStatsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.pod.cpu.usage",
|
||||
"k8s.pod.memory.working_set",
|
||||
},
|
||||
OptionalMetrics: []string{
|
||||
"k8s.pod.cpu_request_utilization",
|
||||
"k8s.pod.cpu_limit_utilization",
|
||||
"k8s.pod.memory_request_utilization",
|
||||
"k8s.pod.memory_limit_utilization",
|
||||
},
|
||||
DocumentationLink: docLinkKubeletStatsReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sClusterReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.container.restarts",
|
||||
"k8s.deployment.desired",
|
||||
"k8s.deployment.available",
|
||||
},
|
||||
DocumentationLink: docLinkK8sClusterReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sAttributesProcessor,
|
||||
RequiredAttrs: []string{"k8s.deployment.name"},
|
||||
DocumentationLink: docLinkK8sAttributesProcessor,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var daemonsetsSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentKubeletStatsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.pod.cpu.usage",
|
||||
"k8s.pod.memory.working_set",
|
||||
},
|
||||
OptionalMetrics: []string{
|
||||
"k8s.pod.cpu_request_utilization",
|
||||
"k8s.pod.cpu_limit_utilization",
|
||||
"k8s.pod.memory_request_utilization",
|
||||
"k8s.pod.memory_limit_utilization",
|
||||
},
|
||||
DocumentationLink: docLinkKubeletStatsReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sClusterReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.container.restarts",
|
||||
"k8s.daemonset.desired_scheduled_nodes",
|
||||
"k8s.daemonset.current_scheduled_nodes",
|
||||
},
|
||||
DocumentationLink: docLinkK8sClusterReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sAttributesProcessor,
|
||||
RequiredAttrs: []string{"k8s.daemonset.name"},
|
||||
DocumentationLink: docLinkK8sAttributesProcessor,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var statefulsetsSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentKubeletStatsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.pod.cpu.usage",
|
||||
"k8s.pod.memory.working_set",
|
||||
},
|
||||
OptionalMetrics: []string{
|
||||
"k8s.pod.cpu_request_utilization",
|
||||
"k8s.pod.cpu_limit_utilization",
|
||||
"k8s.pod.memory_request_utilization",
|
||||
"k8s.pod.memory_limit_utilization",
|
||||
},
|
||||
DocumentationLink: docLinkKubeletStatsReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sClusterReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.container.restarts",
|
||||
"k8s.statefulset.desired_pods",
|
||||
"k8s.statefulset.current_pods",
|
||||
},
|
||||
DocumentationLink: docLinkK8sClusterReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sAttributesProcessor,
|
||||
RequiredAttrs: []string{"k8s.statefulset.name"},
|
||||
DocumentationLink: docLinkK8sAttributesProcessor,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var jobsSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentKubeletStatsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.pod.cpu.usage",
|
||||
"k8s.pod.memory.working_set",
|
||||
},
|
||||
OptionalMetrics: []string{
|
||||
"k8s.pod.cpu_request_utilization",
|
||||
"k8s.pod.cpu_limit_utilization",
|
||||
"k8s.pod.memory_request_utilization",
|
||||
"k8s.pod.memory_limit_utilization",
|
||||
},
|
||||
DocumentationLink: docLinkKubeletStatsReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sClusterReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.container.restarts",
|
||||
"k8s.job.desired_successful_pods",
|
||||
"k8s.job.active_pods",
|
||||
"k8s.job.failed_pods",
|
||||
"k8s.job.successful_pods",
|
||||
},
|
||||
DocumentationLink: docLinkK8sClusterReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sAttributesProcessor,
|
||||
RequiredAttrs: []string{"k8s.job.name"},
|
||||
DocumentationLink: docLinkK8sAttributesProcessor,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var namespacesSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentKubeletStatsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.pod.cpu.usage",
|
||||
"k8s.pod.memory.working_set",
|
||||
},
|
||||
DocumentationLink: docLinkKubeletStatsReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sClusterReceiver,
|
||||
DefaultMetrics: []string{"k8s.pod.phase"},
|
||||
DocumentationLink: docLinkK8sClusterReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sAttributesProcessor,
|
||||
RequiredAttrs: []string{"k8s.namespace.name"},
|
||||
DocumentationLink: docLinkK8sAttributesProcessor,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var clustersSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentKubeletStatsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.node.cpu.usage",
|
||||
"k8s.node.memory.working_set",
|
||||
},
|
||||
DocumentationLink: docLinkKubeletStatsReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sClusterReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.node.allocatable_cpu",
|
||||
"k8s.node.allocatable_memory", //k8s.node.allocatable_cpu and k8s.node.allocatable_memory are
|
||||
// controlled by allocatable_types_to_report config option (Check // https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/4f9a578b210a6dcb9f9bf47942f27208b5765298/receiver/k8sclusterreceiver/metadata.yaml#L805-L806)
|
||||
},
|
||||
DocumentationLink: docLinkK8sClusterReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentResourceDetectionProcessor,
|
||||
RequiredAttrs: []string{"k8s.cluster.name"},
|
||||
DocumentationLink: docLinkResourceDetectionProcessor,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var volumesSpec = onboardingSpec{
|
||||
Buckets: []onboardingComponentBucket{
|
||||
{
|
||||
Component: componentKubeletStatsReceiver,
|
||||
DefaultMetrics: []string{
|
||||
"k8s.volume.available",
|
||||
"k8s.volume.capacity",
|
||||
"k8s.volume.inodes",
|
||||
"k8s.volume.inodes.free",
|
||||
"k8s.volume.inodes.used",
|
||||
},
|
||||
DocumentationLink: docLinkKubeletStatsReceiver,
|
||||
},
|
||||
{
|
||||
Component: componentK8sAttributesProcessor,
|
||||
RequiredAttrs: []string{"k8s.persistentvolumeclaim.name"},
|
||||
DocumentationLink: docLinkK8sAttributesProcessor,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -1,246 +0,0 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Component used across splitBucket cases — it's a processor so the test
|
||||
// doesn't carry any receiver semantics.
|
||||
var testComponent = inframonitoringtypes.AssociatedComponent{
|
||||
Type: inframonitoringtypes.OnboardingComponentTypeReceiver,
|
||||
Name: "testreceiver",
|
||||
}
|
||||
|
||||
const testDocLink = "https://example.com/docs"
|
||||
|
||||
func TestSplitBucket(t *testing.T) {
|
||||
type want struct {
|
||||
presentDefault []string
|
||||
presentOptional []string
|
||||
presentAttrs []string
|
||||
missingDefault []string
|
||||
missingOptional []string
|
||||
missingAttrs []string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
bucket onboardingComponentBucket
|
||||
missingMetrics map[string]bool
|
||||
missingAttrs map[string]bool
|
||||
want want
|
||||
}{
|
||||
{
|
||||
name: "empty bucket — nothing to emit",
|
||||
bucket: onboardingComponentBucket{Component: testComponent, DocumentationLink: testDocLink},
|
||||
missingMetrics: map[string]bool{},
|
||||
missingAttrs: map[string]bool{},
|
||||
want: want{},
|
||||
},
|
||||
{
|
||||
name: "all default metrics present",
|
||||
bucket: onboardingComponentBucket{
|
||||
Component: testComponent,
|
||||
DefaultMetrics: []string{"m1", "m2"},
|
||||
DocumentationLink: testDocLink,
|
||||
},
|
||||
missingMetrics: map[string]bool{},
|
||||
missingAttrs: map[string]bool{},
|
||||
want: want{
|
||||
presentDefault: []string{"m1", "m2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all default metrics missing",
|
||||
bucket: onboardingComponentBucket{
|
||||
Component: testComponent,
|
||||
DefaultMetrics: []string{"m1", "m2"},
|
||||
DocumentationLink: testDocLink,
|
||||
},
|
||||
missingMetrics: map[string]bool{"m1": true, "m2": true},
|
||||
missingAttrs: map[string]bool{},
|
||||
want: want{
|
||||
missingDefault: []string{"m1", "m2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mixed default metrics",
|
||||
bucket: onboardingComponentBucket{
|
||||
Component: testComponent,
|
||||
DefaultMetrics: []string{"m1", "m2", "m3"},
|
||||
DocumentationLink: testDocLink,
|
||||
},
|
||||
missingMetrics: map[string]bool{"m2": true},
|
||||
missingAttrs: map[string]bool{},
|
||||
want: want{
|
||||
presentDefault: []string{"m1", "m3"},
|
||||
missingDefault: []string{"m2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only optional metrics — all missing",
|
||||
bucket: onboardingComponentBucket{
|
||||
Component: testComponent,
|
||||
OptionalMetrics: []string{"opt1", "opt2"},
|
||||
DocumentationLink: testDocLink,
|
||||
},
|
||||
missingMetrics: map[string]bool{"opt1": true, "opt2": true},
|
||||
missingAttrs: map[string]bool{},
|
||||
want: want{
|
||||
missingOptional: []string{"opt1", "opt2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only required attrs — all present",
|
||||
bucket: onboardingComponentBucket{
|
||||
Component: testComponent,
|
||||
RequiredAttrs: []string{"a1", "a2"},
|
||||
DocumentationLink: testDocLink,
|
||||
},
|
||||
missingMetrics: map[string]bool{},
|
||||
missingAttrs: map[string]bool{},
|
||||
want: want{
|
||||
presentAttrs: []string{"a1", "a2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only required attrs — all missing",
|
||||
bucket: onboardingComponentBucket{
|
||||
Component: testComponent,
|
||||
RequiredAttrs: []string{"a1"},
|
||||
DocumentationLink: testDocLink,
|
||||
},
|
||||
missingMetrics: map[string]bool{},
|
||||
missingAttrs: map[string]bool{"a1": true},
|
||||
want: want{
|
||||
missingAttrs: []string{"a1"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "every dimension populated on both sides",
|
||||
bucket: onboardingComponentBucket{
|
||||
Component: testComponent,
|
||||
DefaultMetrics: []string{"d1", "d2"},
|
||||
OptionalMetrics: []string{"o1", "o2"},
|
||||
RequiredAttrs: []string{"a1", "a2"},
|
||||
DocumentationLink: testDocLink,
|
||||
},
|
||||
missingMetrics: map[string]bool{"d2": true, "o1": true},
|
||||
missingAttrs: map[string]bool{"a2": true},
|
||||
want: want{
|
||||
presentDefault: []string{"d1"},
|
||||
missingDefault: []string{"d2"},
|
||||
presentOptional: []string{"o2"},
|
||||
missingOptional: []string{"o1"},
|
||||
presentAttrs: []string{"a1"},
|
||||
missingAttrs: []string{"a2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := splitBucket(tt.bucket, tt.missingMetrics, tt.missingAttrs)
|
||||
|
||||
requireMetricsEntry(t, "presentDefault", got.PresentDefault, tt.want.presentDefault, false)
|
||||
requireMetricsEntry(t, "presentOptional", got.PresentOptional, tt.want.presentOptional, false)
|
||||
requireAttrsEntry(t, "presentAttrs", got.PresentAttrs, tt.want.presentAttrs, false)
|
||||
|
||||
requireMissingMetrics(t, "missingDefault", got.MissingDefault, tt.want.missingDefault)
|
||||
requireMissingMetrics(t, "missingOptional", got.MissingOptional, tt.want.missingOptional)
|
||||
requireMissingAttrs(t, "missingAttrs", got.MissingAttrs, tt.want.missingAttrs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPartitionList(t *testing.T) {
|
||||
present, missing := partitionList(
|
||||
[]string{"a", "b", "c", "d"},
|
||||
map[string]bool{"b": true, "d": true},
|
||||
)
|
||||
require.Equal(t, []string{"a", "c"}, present)
|
||||
require.Equal(t, []string{"b", "d"}, missing)
|
||||
}
|
||||
|
||||
func TestMissingMessageTemplates(t *testing.T) {
|
||||
require.Equal(t,
|
||||
"Missing default metrics m1, m2 from comp. Learn how to configure here.",
|
||||
buildMissingDefaultMetricsMessage([]string{"m1", "m2"}, "comp"),
|
||||
)
|
||||
require.Equal(t,
|
||||
"Missing optional metrics m1 from comp. Learn how to enable here.",
|
||||
buildMissingOptionalMetricsMessage([]string{"m1"}, "comp"),
|
||||
)
|
||||
require.Equal(t,
|
||||
"Missing required attributes a1 from comp. Learn how to configure here.",
|
||||
buildMissingRequiredAttrsMessage([]string{"a1"}, "comp"),
|
||||
)
|
||||
require.Equal(t,
|
||||
"Missing required attributes a1, a2 from comp. Learn how to configure here.",
|
||||
buildMissingRequiredAttrsMessage([]string{"a1", "a2"}, "comp"),
|
||||
)
|
||||
}
|
||||
|
||||
// TestOnboardingSpecs_CoverAllTypes ensures the spec map has an entry for
|
||||
// every OnboardingType — prevents silently shipping an onboarding type that
|
||||
// has no spec and would 500 at runtime.
|
||||
func TestOnboardingSpecs_CoverAllTypes(t *testing.T) {
|
||||
for _, tp := range inframonitoringtypes.ValidOnboardingTypes {
|
||||
_, ok := onboardingSpecs[tp]
|
||||
require.True(t, ok, "missing onboarding spec for type %s", tp)
|
||||
}
|
||||
require.Len(t, onboardingSpecs, len(inframonitoringtypes.ValidOnboardingTypes))
|
||||
}
|
||||
|
||||
// --- helpers ---
|
||||
|
||||
func requireMetricsEntry(t *testing.T, name string, got *inframonitoringtypes.MetricsComponentEntry, wantMetrics []string, _ bool) {
|
||||
t.Helper()
|
||||
if len(wantMetrics) == 0 {
|
||||
require.Nil(t, got, name)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, got, name)
|
||||
require.Equal(t, wantMetrics, got.Metrics, name)
|
||||
require.Equal(t, testComponent, got.AssociatedComponent, name)
|
||||
}
|
||||
|
||||
func requireAttrsEntry(t *testing.T, name string, got *inframonitoringtypes.AttributesComponentEntry, wantAttrs []string, _ bool) {
|
||||
t.Helper()
|
||||
if len(wantAttrs) == 0 {
|
||||
require.Nil(t, got, name)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, got, name)
|
||||
require.Equal(t, wantAttrs, got.Attributes, name)
|
||||
require.Equal(t, testComponent, got.AssociatedComponent, name)
|
||||
}
|
||||
|
||||
func requireMissingMetrics(t *testing.T, name string, got *inframonitoringtypes.MissingMetricsComponentEntry, wantMetrics []string) {
|
||||
t.Helper()
|
||||
if len(wantMetrics) == 0 {
|
||||
require.Nil(t, got, name)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, got, name)
|
||||
require.Equal(t, wantMetrics, got.Metrics, name)
|
||||
require.Equal(t, testComponent, got.AssociatedComponent, name)
|
||||
require.NotEmpty(t, got.Message, name)
|
||||
require.Equal(t, testDocLink, got.DocumentationLink, name)
|
||||
}
|
||||
|
||||
func requireMissingAttrs(t *testing.T, name string, got *inframonitoringtypes.MissingAttributesComponentEntry, wantAttrs []string) {
|
||||
t.Helper()
|
||||
if len(wantAttrs) == 0 {
|
||||
require.Nil(t, got, name)
|
||||
return
|
||||
}
|
||||
require.NotNil(t, got, name)
|
||||
require.Equal(t, wantAttrs, got.Attributes, name)
|
||||
require.Equal(t, testComponent, got.AssociatedComponent, name)
|
||||
require.NotEmpty(t, got.Message, name)
|
||||
require.Equal(t, testDocLink, got.DocumentationLink, name)
|
||||
}
|
||||
@@ -11,11 +11,9 @@ import (
|
||||
type Handler interface {
|
||||
ListHosts(http.ResponseWriter, *http.Request)
|
||||
ListPods(http.ResponseWriter, *http.Request)
|
||||
GetOnboarding(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
type Module interface {
|
||||
ListHosts(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableHosts) (*inframonitoringtypes.Hosts, error)
|
||||
ListPods(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostablePods) (*inframonitoringtypes.Pods, error)
|
||||
GetOnboarding(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableOnboarding) (*inframonitoringtypes.Onboarding, error)
|
||||
}
|
||||
|
||||
@@ -413,28 +413,6 @@ func (b *traceOperatorCTEBuilder) buildFinalQuery(ctx context.Context, selectFro
|
||||
}
|
||||
|
||||
func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFromCTE string) (*qbtypes.Statement, error) {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
|
||||
// Select core fields
|
||||
sb.Select(
|
||||
"timestamp",
|
||||
"trace_id",
|
||||
"span_id",
|
||||
"name",
|
||||
"duration_nano",
|
||||
"parent_span_id",
|
||||
)
|
||||
|
||||
selectedFields := map[string]bool{
|
||||
"timestamp": true,
|
||||
"trace_id": true,
|
||||
"span_id": true,
|
||||
"name": true,
|
||||
"duration_nano": true,
|
||||
"parent_span_id": true,
|
||||
}
|
||||
|
||||
// Get keys for selectFields
|
||||
keySelectors := b.getKeySelectors()
|
||||
for _, field := range b.operator.SelectFields {
|
||||
keySelectors = append(keySelectors, &telemetrytypes.FieldKeySelector{
|
||||
@@ -444,13 +422,30 @@ func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFrom
|
||||
FieldDataType: field.FieldDataType,
|
||||
})
|
||||
}
|
||||
|
||||
keys, _, err := b.stmtBuilder.metadataStore.GetKeysMulti(ctx, keySelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add selectFields using ColumnExpressionFor since we now have all base table columns
|
||||
coreFields := []string{"trace_id", "span_id", "name", "duration_nano", "parent_span_id"}
|
||||
selectedFields := map[string]bool{
|
||||
"timestamp": true,
|
||||
"trace_id": true,
|
||||
"span_id": true,
|
||||
"name": true,
|
||||
"duration_nano": true,
|
||||
"parent_span_id": true,
|
||||
}
|
||||
|
||||
// Inner SELECT reads from the CTE and renames timestamp→ts.
|
||||
// This breaks the `ORDER BY col AS `col`` pattern that triggers a
|
||||
// CH 25.12.5 distributed-analyzer regression (NOT_FOUND_COLUMN_IN_BLOCK /
|
||||
// timestamp renamed to timestamp_0). See ClickHouse/ClickHouse#103508.
|
||||
innerSB := sqlbuilder.NewSelectBuilder()
|
||||
innerSB.Select("timestamp AS ts")
|
||||
innerSB.SelectMore(coreFields...)
|
||||
|
||||
var additionalSelectedFields []string
|
||||
for _, field := range b.operator.SelectFields {
|
||||
if selectedFields[field.Name] {
|
||||
continue
|
||||
@@ -461,41 +456,60 @@ func (b *traceOperatorCTEBuilder) buildListQuery(ctx context.Context, selectFrom
|
||||
slog.String("field", field.Name), errors.Attr(err))
|
||||
continue
|
||||
}
|
||||
sb.SelectMore(colExpr)
|
||||
innerSB.SelectMore(colExpr)
|
||||
selectedFields[field.Name] = true
|
||||
additionalSelectedFields = append(additionalSelectedFields, field.Name)
|
||||
}
|
||||
|
||||
sb.From(selectFromCTE)
|
||||
|
||||
// Add order by support using ColumnExpressionFor
|
||||
orderApplied := false
|
||||
// Also expose any explicit ORDER BY fields that aren't already selected,
|
||||
// so the outer query can reference them by alias name.
|
||||
for _, orderBy := range b.operator.Order {
|
||||
if selectedFields[orderBy.Key.Name] {
|
||||
continue
|
||||
}
|
||||
colExpr, err := b.stmtBuilder.fm.ColumnExpressionFor(ctx, b.start, b.end, &orderBy.Key.TelemetryFieldKey, keys)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sb.OrderBy(fmt.Sprintf("%s %s", colExpr, orderBy.Direction.StringValue()))
|
||||
orderApplied = true
|
||||
innerSB.SelectMore(colExpr)
|
||||
selectedFields[orderBy.Key.Name] = true
|
||||
}
|
||||
|
||||
if !orderApplied {
|
||||
sb.OrderBy("timestamp DESC")
|
||||
innerSB.From(selectFromCTE)
|
||||
innerSQL, innerArgs := innerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
// Outer SELECT reads from the inner subquery and re-exposes timestamp via
|
||||
// the ts alias. ORDER BY uses the alias name directly — no AS-alias in the
|
||||
// ORDER BY position — which is the pattern that avoids the CH regression.
|
||||
outerSB := sqlbuilder.NewSelectBuilder()
|
||||
outerSB.Select("ts AS timestamp")
|
||||
outerSB.SelectMore(coreFields...)
|
||||
for _, name := range additionalSelectedFields {
|
||||
outerSB.SelectMore(fmt.Sprintf("`%s`", name))
|
||||
}
|
||||
outerSB.From(fmt.Sprintf("(%s) AS t", innerSQL))
|
||||
|
||||
if len(b.operator.Order) > 0 {
|
||||
for _, orderBy := range b.operator.Order {
|
||||
outerSB.OrderBy(fmt.Sprintf("`%s` %s", orderBy.Key.Name, orderBy.Direction.StringValue()))
|
||||
}
|
||||
} else {
|
||||
outerSB.OrderBy("timestamp DESC")
|
||||
}
|
||||
|
||||
if b.operator.Limit > 0 {
|
||||
sb.Limit(b.operator.Limit)
|
||||
outerSB.Limit(b.operator.Limit)
|
||||
} else {
|
||||
sb.Limit(100)
|
||||
outerSB.Limit(100)
|
||||
}
|
||||
|
||||
if b.operator.Offset > 0 {
|
||||
sb.Offset(b.operator.Offset)
|
||||
outerSB.Offset(b.operator.Offset)
|
||||
}
|
||||
|
||||
sql, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
outerSQL, outerArgs := outerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
return &qbtypes.Statement{
|
||||
Query: sql,
|
||||
Args: args,
|
||||
Query: outerSQL,
|
||||
Args: append(innerArgs, outerArgs...),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -67,7 +67,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name` FROM A_DIR_DESC_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id, `service.name` FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name` FROM A_DIR_DESC_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -104,7 +104,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_INDIR_DESC_B AS (WITH RECURSIVE up AS (SELECT d.trace_id, d.span_id, d.parent_span_id, 0 AS depth FROM B AS d UNION ALL SELECT p.trace_id, p.span_id, p.parent_span_id, up.depth + 1 FROM all_spans AS p JOIN up ON p.trace_id = up.trace_id AND p.span_id = up.parent_span_id WHERE up.depth < 100) SELECT DISTINCT a.* FROM A AS a GLOBAL INNER JOIN (SELECT DISTINCT trace_id, span_id FROM up WHERE depth > 0 ) AS ancestors ON ancestors.trace_id = a.trace_id AND ancestors.span_id = a.span_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_INDIR_DESC_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_INDIR_DESC_B AS (WITH RECURSIVE up AS (SELECT d.trace_id, d.span_id, d.parent_span_id, 0 AS depth FROM B AS d UNION ALL SELECT p.trace_id, p.span_id, p.parent_span_id, up.depth + 1 FROM all_spans AS p JOIN up ON p.trace_id = up.trace_id AND p.span_id = up.parent_span_id WHERE up.depth < 100) SELECT DISTINCT a.* FROM A AS a GLOBAL INNER JOIN (SELECT DISTINCT trace_id, span_id FROM up WHERE depth > 0 ) AS ancestors ON ancestors.trace_id = a.trace_id AND ancestors.span_id = a.span_id) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id FROM A_INDIR_DESC_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "gateway", "%service.name%", "%service.name\":\"gateway%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "database", "%service.name%", "%service.name\":\"database%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 5},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -141,7 +141,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_AND_B AS (SELECT l.* FROM A AS l INNER JOIN B AS r ON l.trace_id = r.trace_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_AND_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_AND_B AS (SELECT l.* FROM A AS l INNER JOIN B AS r ON l.trace_id = r.trace_id) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id FROM A_AND_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 15},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -178,7 +178,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_OR_B AS (SELECT * FROM A UNION DISTINCT SELECT * FROM B) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_OR_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_OR_B AS (SELECT * FROM A UNION DISTINCT SELECT * FROM B) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id FROM A_OR_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 20},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -215,7 +215,7 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_not_B AS (SELECT l.* FROM A AS l WHERE l.trace_id GLOBAL NOT IN (SELECT DISTINCT trace_id FROM B)) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_not_B ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_not_B AS (SELECT l.* FROM A AS l WHERE l.trace_id GLOBAL NOT IN (SELECT DISTINCT trace_id FROM B)) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id FROM A_not_B) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
@@ -380,11 +380,72 @@ func TestTraceOperatorStatementBuilder(t *testing.T) {
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), __resource_filter_C AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), C AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_C) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_D AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), D AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_D) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), C_DIR_DESC_D AS (SELECT p.* FROM C AS p INNER JOIN D AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), A_DIR_DESC_B_AND_C_DIR_DESC_D AS (SELECT l.* FROM A_DIR_DESC_B AS l INNER JOIN C_DIR_DESC_D AS r ON l.trace_id = r.trace_id) SELECT timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM A_DIR_DESC_B_AND_C_DIR_DESC_D ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), __resource_filter_C AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), C AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_C) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_D AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), D AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_D) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), C_DIR_DESC_D AS (SELECT p.* FROM C AS p INNER JOIN D AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id), A_DIR_DESC_B_AND_C_DIR_DESC_D AS (SELECT l.* FROM A_DIR_DESC_B AS l INNER JOIN C_DIR_DESC_D AS r ON l.trace_id = r.trace_id) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id FROM A_DIR_DESC_B_AND_C_DIR_DESC_D) AS t ORDER BY timestamp DESC LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "auth", "%service.name%", "%service.name\":\"auth%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "database", "%service.name%", "%service.name\":\"database%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 5},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
// order-by field (http.request.method) is not present in SelectFields;
|
||||
// it must be included in the inner SELECT so the outer ORDER BY can
|
||||
// reference it by alias, but must NOT appear in the outer SELECT list.
|
||||
name: "order by field not in select fields",
|
||||
requestType: qbtypes.RequestTypeRaw,
|
||||
operator: qbtypes.QueryBuilderTraceOperator{
|
||||
Expression: "A => B",
|
||||
SelectFields: []telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
Order: []qbtypes.OrderBy{
|
||||
{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "http.request.method",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
Limit: 10,
|
||||
},
|
||||
compositeQuery: &qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'frontend'",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'backend'",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: qbtypes.Statement{
|
||||
// http.request.method is in the inner SELECT (so ORDER BY can reach it)
|
||||
// but is absent from the outer SELECT column list — only the ORDER BY clause references it.
|
||||
Query: "WITH toDateTime64(1747947419000000000, 9) AS t_from, toDateTime64(1747983448000000000, 9) AS t_to, 1747945619 AS bucket_from, 1747983448 AS bucket_to, all_spans AS (SELECT *, resource_string_service$$name AS `service.name` FROM signoz_traces.distributed_signoz_index_v3 WHERE timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_A AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), A AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_A) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), __resource_filter_B AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (simpleJSONExtractString(labels, 'service.name') = ? AND labels LIKE ? AND labels LIKE ?) AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?), B AS (SELECT * FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter_B) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ?), A_DIR_DESC_B AS (SELECT p.* FROM A AS p INNER JOIN B AS c ON p.trace_id = c.trace_id AND p.span_id = c.parent_span_id) SELECT ts AS timestamp, trace_id, span_id, name, duration_nano, parent_span_id, `service.name` FROM (SELECT timestamp AS ts, trace_id, span_id, name, duration_nano, parent_span_id, multiIf(resource.`service.name` IS NOT NULL, resource.`service.name`::String, mapContains(resources_string, 'service.name'), resources_string['service.name'], NULL) AS `service.name`, attributes_string['http.request.method'] AS `http.request.method` FROM A_DIR_DESC_B) AS t ORDER BY `http.request.method` desc LIMIT ? SETTINGS distributed_product_mode='allow', max_memory_usage=10000000000",
|
||||
Args: []any{"1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "frontend", "%service.name%", "%service.name\":\"frontend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), "backend", "%service.name%", "%service.name\":\"backend%", uint64(1747945619), uint64(1747983448), "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
fm := NewFieldMapper()
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
)
|
||||
|
||||
// PostableOnboarding is the request for GET /api/v2/infra_monitoring/onboarding.
|
||||
// The single `type` query param selects which infra-monitoring subsection the
|
||||
// readiness check runs for.
|
||||
type PostableOnboarding struct {
|
||||
Type OnboardingType `query:"type" required:"true"`
|
||||
}
|
||||
|
||||
// Validate rejects empty/unknown onboarding types.
|
||||
func (req *PostableOnboarding) Validate() error {
|
||||
if req == nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
|
||||
}
|
||||
|
||||
if req.Type.IsZero() {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "type is required")
|
||||
}
|
||||
|
||||
if !slices.Contains(ValidOnboardingTypes, req.Type) {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid type: %s", req.Type)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Onboarding is the response for GET /api/v2/infra_monitoring/onboarding.
|
||||
//
|
||||
// The three present/missing pairs partition a type's requirements into three
|
||||
// dimensions — default-enabled metrics, optional metrics, required attributes —
|
||||
// each bucketed by the collector component (receiver or processor) that
|
||||
// produces it. Ready is true iff every Missing* array is empty.
|
||||
type Onboarding struct {
|
||||
Type OnboardingType `json:"type" required:"true"`
|
||||
Ready bool `json:"ready" required:"true"`
|
||||
PresentDefaultEnabledMetrics []MetricsComponentEntry `json:"presentDefaultEnabledMetrics" required:"true"`
|
||||
PresentOptionalMetrics []MetricsComponentEntry `json:"presentOptionalMetrics" required:"true"`
|
||||
PresentRequiredAttributes []AttributesComponentEntry `json:"presentRequiredAttributes" required:"true"`
|
||||
MissingDefaultEnabledMetrics []MissingMetricsComponentEntry `json:"missingDefaultEnabledMetrics" required:"true"`
|
||||
MissingOptionalMetrics []MissingMetricsComponentEntry `json:"missingOptionalMetrics" required:"true"`
|
||||
MissingRequiredAttributes []MissingAttributesComponentEntry `json:"missingRequiredAttributes" required:"true"`
|
||||
}
|
||||
|
||||
// AssociatedComponent identifies the collector receiver or processor that a
|
||||
// metric or attribute originates from. Name is free-form (e.g. "kubeletstatsreceiver").
|
||||
type AssociatedComponent struct {
|
||||
Type OnboardingComponentType `json:"type" required:"true"`
|
||||
Name string `json:"name" required:"true"`
|
||||
}
|
||||
|
||||
// MetricsComponentEntry lists metrics that share a single associated component.
|
||||
type MetricsComponentEntry struct {
|
||||
Metrics []string `json:"metrics" required:"true"`
|
||||
AssociatedComponent AssociatedComponent `json:"associatedComponent" required:"true"`
|
||||
}
|
||||
|
||||
// AttributesComponentEntry lists resource attributes that share a single associated component.
|
||||
type AttributesComponentEntry struct {
|
||||
Attributes []string `json:"attributes" required:"true"`
|
||||
AssociatedComponent AssociatedComponent `json:"associatedComponent" required:"true"`
|
||||
}
|
||||
|
||||
// MissingMetricsComponentEntry extends MetricsComponentEntry with a user-facing
|
||||
// message and a docs link for fixing the missing metrics.
|
||||
type MissingMetricsComponentEntry struct {
|
||||
MetricsComponentEntry
|
||||
Message string `json:"message" required:"true"`
|
||||
DocumentationLink string `json:"documentationLink" required:"true"`
|
||||
}
|
||||
|
||||
// MissingAttributesComponentEntry extends AttributesComponentEntry with a user-facing
|
||||
// message and a docs link for fixing the missing attributes.
|
||||
type MissingAttributesComponentEntry struct {
|
||||
AttributesComponentEntry
|
||||
Message string `json:"message" required:"true"`
|
||||
DocumentationLink string `json:"documentationLink" required:"true"`
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
// OnboardingType identifies a single infra-monitoring subsection (UI tab).
|
||||
// One value per v1/v2 list API we surface in the infra-monitoring section.
|
||||
type OnboardingType struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
OnboardingTypeHosts = OnboardingType{valuer.NewString("hosts")}
|
||||
OnboardingTypeProcesses = OnboardingType{valuer.NewString("processes")}
|
||||
OnboardingTypePods = OnboardingType{valuer.NewString("pods")}
|
||||
OnboardingTypeNodes = OnboardingType{valuer.NewString("nodes")}
|
||||
OnboardingTypeDeployments = OnboardingType{valuer.NewString("deployments")}
|
||||
OnboardingTypeDaemonsets = OnboardingType{valuer.NewString("daemonsets")}
|
||||
OnboardingTypeStatefulsets = OnboardingType{valuer.NewString("statefulsets")}
|
||||
OnboardingTypeJobs = OnboardingType{valuer.NewString("jobs")}
|
||||
OnboardingTypeNamespaces = OnboardingType{valuer.NewString("namespaces")}
|
||||
OnboardingTypeClusters = OnboardingType{valuer.NewString("clusters")}
|
||||
OnboardingTypeVolumes = OnboardingType{valuer.NewString("volumes")}
|
||||
)
|
||||
|
||||
func (OnboardingType) Enum() []any {
|
||||
return []any{
|
||||
OnboardingTypeHosts,
|
||||
OnboardingTypeProcesses,
|
||||
OnboardingTypePods,
|
||||
OnboardingTypeNodes,
|
||||
OnboardingTypeDeployments,
|
||||
OnboardingTypeDaemonsets,
|
||||
OnboardingTypeStatefulsets,
|
||||
OnboardingTypeJobs,
|
||||
OnboardingTypeNamespaces,
|
||||
OnboardingTypeClusters,
|
||||
OnboardingTypeVolumes,
|
||||
}
|
||||
}
|
||||
|
||||
var ValidOnboardingTypes = []OnboardingType{
|
||||
OnboardingTypeHosts,
|
||||
OnboardingTypeProcesses,
|
||||
OnboardingTypePods,
|
||||
OnboardingTypeNodes,
|
||||
OnboardingTypeDeployments,
|
||||
OnboardingTypeDaemonsets,
|
||||
OnboardingTypeStatefulsets,
|
||||
OnboardingTypeJobs,
|
||||
OnboardingTypeNamespaces,
|
||||
OnboardingTypeClusters,
|
||||
OnboardingTypeVolumes,
|
||||
}
|
||||
|
||||
// OnboardingComponentType tags each AssociatedComponent as either a receiver or a processor.
|
||||
// Only these two values are ever written by the module.
|
||||
type OnboardingComponentType struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
OnboardingComponentTypeReceiver = OnboardingComponentType{valuer.NewString("receiver")}
|
||||
OnboardingComponentTypeProcessor = OnboardingComponentType{valuer.NewString("processor")}
|
||||
)
|
||||
|
||||
func (OnboardingComponentType) Enum() []any {
|
||||
return []any{
|
||||
OnboardingComponentTypeReceiver,
|
||||
OnboardingComponentTypeProcessor,
|
||||
}
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPostableOnboarding_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req *PostableOnboarding
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil request",
|
||||
req: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "empty type",
|
||||
req: &PostableOnboarding{},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "unknown type",
|
||||
req: &PostableOnboarding{Type: OnboardingType{valuer.NewString("foo")}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "hosts",
|
||||
req: &PostableOnboarding{Type: OnboardingTypeHosts},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "processes",
|
||||
req: &PostableOnboarding{Type: OnboardingTypeProcesses},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "pods",
|
||||
req: &PostableOnboarding{Type: OnboardingTypePods},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nodes",
|
||||
req: &PostableOnboarding{Type: OnboardingTypeNodes},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "deployments",
|
||||
req: &PostableOnboarding{Type: OnboardingTypeDeployments},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "daemonsets",
|
||||
req: &PostableOnboarding{Type: OnboardingTypeDaemonsets},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "statefulsets",
|
||||
req: &PostableOnboarding{Type: OnboardingTypeStatefulsets},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "jobs",
|
||||
req: &PostableOnboarding{Type: OnboardingTypeJobs},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "namespaces",
|
||||
req: &PostableOnboarding{Type: OnboardingTypeNamespaces},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "clusters",
|
||||
req: &PostableOnboarding{Type: OnboardingTypeClusters},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "volumes",
|
||||
req: &PostableOnboarding{Type: OnboardingTypeVolumes},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.req.Validate()
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidOnboardingTypes_MatchesEnum ensures the ValidOnboardingTypes slice
|
||||
// stays in sync with the Enum() list — both must cover every OnboardingType value.
|
||||
func TestValidOnboardingTypes_MatchesEnum(t *testing.T) {
|
||||
enum := OnboardingType{}.Enum()
|
||||
require.Equal(t, len(enum), len(ValidOnboardingTypes))
|
||||
for i, v := range enum {
|
||||
require.Equal(t, v, ValidOnboardingTypes[i])
|
||||
}
|
||||
}
|
||||
3
tests/fixtures/querier.py
vendored
3
tests/fixtures/querier.py
vendored
@@ -72,6 +72,7 @@ class TraceOperatorQuery:
|
||||
return_spans_from: str
|
||||
limit: int | None = None
|
||||
order: list[OrderBy] | None = None
|
||||
select_fields: list[TelemetryFieldKey] | None = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
spec: dict[str, Any] = {
|
||||
@@ -83,6 +84,8 @@ class TraceOperatorQuery:
|
||||
spec["limit"] = self.limit
|
||||
if self.order:
|
||||
spec["order"] = [o.to_dict() if hasattr(o, "to_dict") else o for o in self.order]
|
||||
if self.select_fields:
|
||||
spec["selectFields"] = [f.to_dict() for f in self.select_fields]
|
||||
return {"type": "builder_trace_operator", "spec": spec}
|
||||
|
||||
|
||||
|
||||
@@ -530,11 +530,13 @@ def test_export_traces_with_composite_query_trace_operator(
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert multiple traces with parent-child relationships.
|
||||
Insert a parent span and two child spans, all with an http.method attribute.
|
||||
|
||||
Tests:
|
||||
1. Export traces using trace operator in composite query (POST)
|
||||
2. Verify trace operator query works correctly
|
||||
1. Basic trace operator (A => B) returning parent spans, ordered by timestamp.
|
||||
2. Same operator with selectFields=[service.name] and order by http.method, which is
|
||||
NOT in selectFields — verifies the inner/outer subquery fix for the CH 25.12.5
|
||||
NOT_FOUND_COLUMN_IN_BLOCK regression (ORDER BY col AS `col` in a CTE shape).
|
||||
"""
|
||||
parent_trace_id = TraceIdGenerator.trace_id()
|
||||
parent_span_id = TraceIdGenerator.span_id()
|
||||
@@ -555,12 +557,8 @@ def test_export_traces_with_composite_query_trace_operator(
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "parent-service",
|
||||
},
|
||||
attributes={
|
||||
"operation.type": "parent",
|
||||
},
|
||||
resources={"service.name": "parent-service"},
|
||||
attributes={"operation.type": "parent", "http.method": "GET"},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=9),
|
||||
@@ -572,12 +570,8 @@ def test_export_traces_with_composite_query_trace_operator(
|
||||
kind=TracesKind.SPAN_KIND_INTERNAL,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "parent-service",
|
||||
},
|
||||
attributes={
|
||||
"operation.type": "child",
|
||||
},
|
||||
resources={"service.name": "parent-service"},
|
||||
attributes={"operation.type": "child", "http.method": "POST"},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=7),
|
||||
@@ -589,31 +583,23 @@ def test_export_traces_with_composite_query_trace_operator(
|
||||
kind=TracesKind.SPAN_KIND_INTERNAL,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "parent-service",
|
||||
},
|
||||
attributes={
|
||||
"operation.type": "child",
|
||||
},
|
||||
resources={"service.name": "parent-service"},
|
||||
attributes={"operation.type": "child", "http.method": "POST"},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
# A: spans with operation.type = 'parent'
|
||||
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
|
||||
query_a = BuilderQuery(
|
||||
signal="traces",
|
||||
name="A",
|
||||
limit=1000,
|
||||
filter_expression="operation.type = 'parent'",
|
||||
)
|
||||
|
||||
# B: spans with operation.type = 'child'
|
||||
query_b = BuilderQuery(
|
||||
signal="traces",
|
||||
name="B",
|
||||
@@ -621,47 +607,50 @@ def test_export_traces_with_composite_query_trace_operator(
|
||||
filter_expression="operation.type = 'child'",
|
||||
)
|
||||
|
||||
# Trace operator: find traces where A has a direct descendant B
|
||||
query_c = TraceOperatorQuery(
|
||||
name="C",
|
||||
expression="A => B",
|
||||
return_spans_from="A",
|
||||
limit=1000,
|
||||
order=[OrderBy(TelemetryFieldKey("timestamp", "string", "span"), "desc")],
|
||||
def export(operator: TraceOperatorQuery) -> list[dict]:
|
||||
body = QueryRangeRequest(
|
||||
start=start_ns,
|
||||
end=end_ns,
|
||||
queries=[query_a, query_b, operator],
|
||||
).to_dict()
|
||||
resp = requests.post(
|
||||
url,
|
||||
json=body,
|
||||
timeout=10,
|
||||
headers={"authorization": f"Bearer {token}", "Content-Type": "application/json"},
|
||||
)
|
||||
assert resp.status_code == HTTPStatus.OK, resp.text
|
||||
assert resp.headers["Content-Type"] == "application/x-ndjson"
|
||||
return [json.loads(line) for line in resp.text.strip().split("\n") if line]
|
||||
|
||||
# Test 1: basic trace operator ordered by timestamp
|
||||
spans = export(
|
||||
TraceOperatorQuery(
|
||||
name="C",
|
||||
expression="A => B",
|
||||
return_spans_from="A",
|
||||
limit=1000,
|
||||
order=[OrderBy(TelemetryFieldKey("timestamp", "string", "span"), "desc")],
|
||||
)
|
||||
)
|
||||
assert len(spans) == 1
|
||||
assert all(s.get("trace_id") == parent_trace_id for s in spans)
|
||||
assert any(s.get("name") == "parent-operation" for s in spans)
|
||||
|
||||
body = QueryRangeRequest(
|
||||
start=start_ns,
|
||||
end=end_ns,
|
||||
queries=[query_a, query_b, query_c],
|
||||
).to_dict()
|
||||
|
||||
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
|
||||
response = requests.post(
|
||||
url,
|
||||
json=body,
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
# Test 2: order-by field (http.method) absent from selectFields
|
||||
spans = export(
|
||||
TraceOperatorQuery(
|
||||
name="C",
|
||||
expression="A => B",
|
||||
return_spans_from="A",
|
||||
limit=1000,
|
||||
select_fields=[TelemetryFieldKey("service.name", "string", "resource")],
|
||||
order=[OrderBy(TelemetryFieldKey("http.method", "string", "tag"), "desc")],
|
||||
)
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 1, f"Expected at least 1 line, got {len(jsonl_lines)}"
|
||||
|
||||
# Verify all returned spans belong to the matched trace
|
||||
json_objects = [json.loads(line) for line in jsonl_lines]
|
||||
trace_ids = [obj.get("trace_id") for obj in json_objects]
|
||||
assert all(tid == parent_trace_id for tid in trace_ids)
|
||||
|
||||
# Verify the parent span (returnSpansFrom = "A") is present
|
||||
span_names = [obj.get("name") for obj in json_objects]
|
||||
assert "parent-operation" in span_names
|
||||
assert len(spans) >= 1
|
||||
assert all(s.get("trace_id") == parent_trace_id for s in spans)
|
||||
assert any(s.get("name") == "parent-operation" for s in spans)
|
||||
|
||||
|
||||
def test_export_traces_with_select_fields(
|
||||
|
||||
Reference in New Issue
Block a user