Compare commits

...

23 Commits

Author SHA1 Message Date
Nikhil Mantri
c36f747b1d Merge branch 'main' into feat/improve_infra_monitoring_list_apis 2026-02-28 17:33:47 +05:30
nikhilmantri0902
14f733ddd8 chore: invalid input error returning 2026-02-28 17:32:21 +05:30
Nikhil Mantri
2337bc5ad9 Merge branch 'main' into feat/improve_infra_monitoring_list_apis 2026-02-26 20:11:52 +05:30
nikhilmantri0902
e3fdf8c463 Merge branch 'feat/update_hosts_list_error_messaging' into feat/improve_infra_monitoring_list_apis 2026-02-26 13:17:46 +05:30
nikhilmantri0902
a518b1688b chore: rename func 2026-02-26 13:17:05 +05:30
Nikhil Mantri
866d005cc6 Merge branch 'main' into feat/improve_infra_monitoring_list_apis 2026-02-26 12:41:20 +05:30
nikhilmantri0902
7e7e3579b8 chore: updated metadata fetching logic in all list apis 2026-02-26 12:32:17 +05:30
Nikhil Mantri
44a94e7746 Merge branch 'main' into feat/update_hosts_list_error_messaging 2026-02-26 11:04:38 +05:30
Nikhil Mantri
38d334b9aa Merge branch 'main' into feat/update_hosts_list_error_messaging 2026-02-24 20:36:14 +05:30
Nikhil Mantri
3f27e49eac Merge branch 'main' into feat/update_hosts_list_error_messaging 2026-02-20 12:57:03 +05:30
nikhilmantri0902
0b8e87ec96 chore: added test case and final comment resolve 2026-02-19 16:51:58 +05:30
nikhilmantri0902
df2916bf7f chore: review comments 1 2026-02-19 16:15:30 +05:30
Nikhil Mantri
0c62c075f9 Merge branch 'main' into feat/update_hosts_list_error_messaging 2026-02-19 15:54:14 +05:30
nikhilmantri0902
f42d95d5f5 chore: title updated for no host metrics found 2026-02-18 12:32:32 +05:30
nikhilmantri0902
32c0dfa28f chore: title for end time before earliest metadata time 2026-02-18 12:25:09 +05:30
Nikhil Mantri
68e831c4b0 Merge branch 'main' into feat/update_hosts_list_error_messaging 2026-02-17 15:51:27 +05:30
Nikhil Mantri
ed79d40492 Merge branch 'main' into feat/update_hosts_list_error_messaging 2026-02-17 14:16:13 +05:30
nikhilmantri0902
ad8223c792 chore: refactor and conditions combine 2026-02-17 13:46:55 +05:30
nikhilmantri0902
5d691476b1 chore: rearrangement 2026-02-17 12:46:38 +05:30
nikhilmantri0902
358977e203 chore: frontend messaging test fix 2026-02-16 14:37:03 +05:30
nikhilmantri0902
7ffb3e30b5 chore: improved messaging and comment 2026-02-16 14:19:12 +05:30
nikhilmantri0902
ded7b78360 chore: use named query 2026-02-16 13:43:43 +05:30
nikhilmantri0902
76e9074ca7 chore: initial logical changes 2026-02-15 19:04:44 +05:30
12 changed files with 229 additions and 28 deletions

View File

@@ -97,7 +97,14 @@ func (p *ClustersRepo) getMetadataAttributes(ctx context.Context, req model.Clus
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherClusterMetricsForMetadata := make([]string, 0)
for _, metric := range metricNamesForNodes {
if metric != metricToUseForClusters {
otherClusterMetricsForMetadata = append(otherClusterMetricsForMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherClusterMetricsForMetadata)
if err != nil {
return nil, err
}

View File

@@ -164,7 +164,19 @@ func (d *DaemonSetsRepo) getMetadataAttributes(ctx context.Context, req model.Da
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherDaemonSetMetricsForMetadata := make([]string, 0)
for _, metric := range metricNamesForWorkloads {
if metric != metricToUseForDaemonSets {
otherDaemonSetMetricsForMetadata = append(otherDaemonSetMetricsForMetadata, metric)
}
}
for _, metric := range metricNamesForDaemonSets {
if metric != metricToUseForDaemonSets {
otherDaemonSetMetricsForMetadata = append(otherDaemonSetMetricsForMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherDaemonSetMetricsForMetadata)
if err != nil {
return nil, err
}

View File

@@ -164,7 +164,19 @@ func (d *DeploymentsRepo) getMetadataAttributes(ctx context.Context, req model.D
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherDeploymentMetricsForMetadata := make([]string, 0)
for _, metric := range metricNamesForWorkloads {
if metric != metricToUseForDeployments {
otherDeploymentMetricsForMetadata = append(otherDeploymentMetricsForMetadata, metric)
}
}
for _, metric := range metricNamesForDeployments {
if metric != metricToUseForDeployments {
otherDeploymentMetricsForMetadata = append(otherDeploymentMetricsForMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherDeploymentMetricsForMetadata)
if err != nil {
return nil, err
}

View File

@@ -61,11 +61,13 @@ var (
// TODO(srikanthccv): remove hardcoded metric name and support keys from any system metric
metricToUseForHostAttributes = GetDotMetrics("system_cpu_load_average_15m")
hostNameAttrKey = GetDotMetrics("host_name")
agentNameToIgnore = "k8s-infra-otel-agent"
hostAttrsToEnrich = []string{
hostNameAttrKey = GetDotMetrics("host_name")
agentNameToIgnore = "k8s-infra-otel-agent"
hostAttrsToEnrich = []string{
GetDotMetrics("os_type"),
}
metricNamesForHosts = map[string]string{
"filesystem": GetDotMetrics("system_filesystem_usage"),
"cpu": GetDotMetrics("system_cpu_time"),
@@ -215,7 +217,14 @@ func (h *HostsRepo) getMetadataAttributes(ctx context.Context, req model.HostLis
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherHostMetricsForMetadata := make([]string, 0)
for _, metric := range metricNamesForHosts {
if metric != metricToUseForHostAttributes {
otherHostMetricsForMetadata = append(otherHostMetricsForMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherHostMetricsForMetadata)
if err != nil {
return nil, err
}

View File

@@ -208,7 +208,19 @@ func (d *JobsRepo) getMetadataAttributes(ctx context.Context, req model.JobListR
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherJobMetricsForMetadata := make([]string, 0)
for _, metric := range metricNamesForWorkloads {
if metric != metricToUseForJobs {
otherJobMetricsForMetadata = append(otherJobMetricsForMetadata, metric)
}
}
for _, metric := range metricNamesForJobs {
if metric != metricToUseForJobs {
otherJobMetricsForMetadata = append(otherJobMetricsForMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherJobMetricsForMetadata)
if err != nil {
return nil, err
}

View File

@@ -91,7 +91,14 @@ func (p *NamespacesRepo) getMetadataAttributes(ctx context.Context, req model.Na
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherNamespaceMetricsForMetadata := make([]string, 0)
for _, metric := range metricNamesForPods {
if metric != metricToUseForNamespaces {
otherNamespaceMetricsForMetadata = append(otherNamespaceMetricsForMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherNamespaceMetricsForMetadata)
if err != nil {
return nil, err
}

View File

@@ -121,7 +121,14 @@ func (p *NodesRepo) getMetadataAttributes(ctx context.Context, req model.NodeLis
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherNodeMetricsForMetadata := make([]string, 0)
for _, metric := range metricNamesForNodes {
if metric != metricToUseForNodes {
otherNodeMetricsForMetadata = append(otherNodeMetricsForMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherNodeMetricsForMetadata)
if err != nil {
return nil, err
}

View File

@@ -21,6 +21,17 @@ import (
var (
metricToUseForPods = GetDotMetrics("k8s_pod_cpu_usage")
metricNamesForPods = map[string]string{
"cpu": GetDotMetrics("k8s_pod_cpu_usage"),
"cpu_request": GetDotMetrics("k8s_pod_cpu_request_utilization"),
"cpu_limit": GetDotMetrics("k8s_pod_cpu_limit_utilization"),
"memory": GetDotMetrics("k8s_pod_memory_working_set"),
"memory_request": GetDotMetrics("k8s_pod_memory_request_utilization"),
"memory_limit": GetDotMetrics("k8s_pod_memory_limit_utilization"),
"restarts": GetDotMetrics("k8s_container_restarts"),
"pod_phase": GetDotMetrics("k8s_pod_phase"),
}
podAttrsToEnrich = []string{
GetDotMetrics("k8s_pod_uid"),
GetDotMetrics("k8s_pod_name"),
@@ -47,17 +58,6 @@ var (
"pod_phase": {"H", "I", "J", "K"},
}
podQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"}
metricNamesForPods = map[string]string{
"cpu": GetDotMetrics("k8s_pod_cpu_usage"),
"cpu_request": GetDotMetrics("k8s_pod_cpu_request_utilization"),
"cpu_limit": GetDotMetrics("k8s_pod_cpu_limit_utilization"),
"memory": GetDotMetrics("k8s_pod_memory_working_set"),
"memory_request": GetDotMetrics("k8s_pod_memory_request_utilization"),
"memory_limit": GetDotMetrics("k8s_pod_memory_limit_utilization"),
"restarts": GetDotMetrics("k8s_container_restarts"),
"pod_phase": GetDotMetrics("k8s_pod_phase"),
}
)
type PodsRepo struct {
@@ -266,7 +266,14 @@ func (p *PodsRepo) getMetadataAttributes(ctx context.Context, req model.PodListR
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherPodMetricsForMetadata := make([]string, 0)
for _, metric := range metricNamesForPods {
if metric != metricToUseForPods {
otherPodMetricsForMetadata = append(otherPodMetricsForMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherPodMetricsForMetadata)
if err != nil {
return nil, err
}

View File

@@ -26,10 +26,12 @@ var (
"cpu": GetDotMetrics("process_cpu_time"),
"memory": GetDotMetrics("process_memory_usage"),
}
metricToUseForProcessAttributes = GetDotMetrics("process_memory_usage")
processNameAttrKey = GetDotMetrics("process_executable_name")
processCMDAttrKey = GetDotMetrics("process_command")
processCMDLineAttrKey = GetDotMetrics("process_command_line")
processNameAttrKey = GetDotMetrics("process_executable_name")
processCMDAttrKey = GetDotMetrics("process_command")
processCMDLineAttrKey = GetDotMetrics("process_command_line")
)
type ProcessesRepo struct {
@@ -108,7 +110,14 @@ func (p *ProcessesRepo) getMetadataAttributes(ctx context.Context,
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherMetricsForProcessMetadata := make([]string, 0)
for _, metric := range metricNamesForProcesses {
if metric != metricToUseForProcessAttributes {
otherMetricsForProcessMetadata = append(otherMetricsForProcessMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherMetricsForProcessMetadata)
if err != nil {
return nil, err
}

View File

@@ -124,7 +124,14 @@ func (p *PvcsRepo) getMetadataAttributes(ctx context.Context, req model.VolumeLi
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherVolumeMetricsForMetadata := make([]string, 0)
for _, metric := range metricNamesForVolumes {
if metric != metricToUseForVolumes {
otherVolumeMetricsForMetadata = append(otherVolumeMetricsForMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherVolumeMetricsForMetadata)
if err != nil {
return nil, err
}

View File

@@ -164,7 +164,19 @@ func (d *StatefulSetsRepo) getMetadataAttributes(ctx context.Context, req model.
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
otherStatefulSetMetricsForMetadata := make([]string, 0)
for _, metric := range metricNamesForWorkloads {
if metric != metricToUseForStatefulSets {
otherStatefulSetMetricsForMetadata = append(otherStatefulSetMetricsForMetadata, metric)
}
}
for _, metric := range metricNamesForStatefulSets {
if metric != metricToUseForStatefulSets {
otherStatefulSetMetricsForMetadata = append(otherStatefulSetMetricsForMetadata, metric)
}
}
query, err := helpers.PrepareTimeseriesFilterQueryWithMultipleMetrics(req.Start, req.End, &mq, otherStatefulSetMetricsForMetadata)
if err != nil {
return nil, err
}

View File

@@ -5,6 +5,7 @@ import (
"strings"
"time"
"github.com/SigNoz/signoz/pkg/errors"
"github.com/SigNoz/signoz/pkg/query-service/constants"
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
"github.com/SigNoz/signoz/pkg/query-service/utils"
@@ -346,6 +347,105 @@ func PrepareTimeseriesFilterQuery(start, end int64, mq *v3.BuilderQuery) (string
return filterSubQuery, nil
}
// PrepareTimeseriesFilterQuery builds the sub-query to be used for filtering timeseries based on the search criteria
func PrepareTimeseriesFilterQueryWithMultipleMetrics(start, end int64, mq *v3.BuilderQuery, metricNames []string) (string, error) {
var conditions []string
var fs *v3.FilterSet = mq.Filters
var groupTags []v3.AttributeKey = mq.GroupBy
if mq.AggregateAttribute.Key != "" {
metricNames = append(metricNames, mq.AggregateAttribute.Key)
}
conditions = append(conditions, fmt.Sprintf("metric_name IN %s", utils.ClickHouseFormattedMetricNames(metricNames)))
if constants.IsDotMetricsEnabled {
conditions = append(conditions, "__normalized = false")
} else {
conditions = append(conditions, "__normalized = true")
}
start, end, tableName := whichTSTableToUse(start, end, mq)
conditions = append(conditions, fmt.Sprintf("unix_milli >= %d AND unix_milli < %d", start, end))
if fs != nil && len(fs.Items) != 0 {
for _, item := range fs.Items {
if item.Key.Key == "__value" {
continue
}
toFormat := item.Value
op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains {
toFormat = fmt.Sprintf("%%%s%%", toFormat)
}
var fmtVal string
if op != v3.FilterOperatorExists && op != v3.FilterOperatorNotExists {
fmtVal = utils.ClickHouseFormattedValue(toFormat)
}
switch op {
case v3.FilterOperatorEqual:
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') = %s", item.Key.Key, fmtVal))
case v3.FilterOperatorNotEqual:
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') != %s", item.Key.Key, fmtVal))
case v3.FilterOperatorIn:
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') IN %s", item.Key.Key, fmtVal))
case v3.FilterOperatorNotIn:
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') NOT IN %s", item.Key.Key, fmtVal))
case v3.FilterOperatorLike:
conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
case v3.FilterOperatorNotLike:
conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
case v3.FilterOperatorRegex:
conditions = append(conditions, fmt.Sprintf("match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
case v3.FilterOperatorNotRegex:
conditions = append(conditions, fmt.Sprintf("not match(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
case v3.FilterOperatorGreaterThan:
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') > %s", item.Key.Key, fmtVal))
case v3.FilterOperatorGreaterThanOrEq:
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') >= %s", item.Key.Key, fmtVal))
case v3.FilterOperatorLessThan:
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') < %s", item.Key.Key, fmtVal))
case v3.FilterOperatorLessThanOrEq:
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') <= %s", item.Key.Key, fmtVal))
case v3.FilterOperatorContains:
conditions = append(conditions, fmt.Sprintf("like(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
case v3.FilterOperatorNotContains:
conditions = append(conditions, fmt.Sprintf("notLike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
case v3.FilterOperatorExists:
conditions = append(conditions, fmt.Sprintf("has(JSONExtractKeys(labels), '%s')", item.Key.Key))
case v3.FilterOperatorNotExists:
conditions = append(conditions, fmt.Sprintf("not has(JSONExtractKeys(labels), '%s')", item.Key.Key))
case v3.FilterOperatorILike:
conditions = append(conditions, fmt.Sprintf("ilike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
case v3.FilterOperatorNotILike:
conditions = append(conditions, fmt.Sprintf("notILike(JSONExtractString(labels, '%s'), %s)", item.Key.Key, fmtVal))
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported filter operator")
}
}
}
whereClause := strings.Join(conditions, " AND ")
var selectLabels string
for _, tag := range groupTags {
selectLabels += fmt.Sprintf("JSONExtractString(labels, '%s') as %s, ", tag.Key, utils.AddBackTickToFormatTag(tag.Key))
}
// The table JOIN key always exists
selectLabels += "fingerprint"
filterSubQuery := fmt.Sprintf(
"SELECT DISTINCT %s FROM %s.%s WHERE %s",
selectLabels,
constants.SIGNOZ_METRIC_DBNAME,
tableName,
whereClause,
)
return filterSubQuery, nil
}
// PrepareTimeseriesFilterQuery builds the sub-query to be used for filtering timeseries based on the search criteria
func PrepareTimeseriesFilterQueryV3(start, end int64, mq *v3.BuilderQuery) (string, error) {
var conditions []string