Compare commits

..

18 Commits

Author SHA1 Message Date
Srikanth Chekuri
fa90fad373 chore: add pvcs list (#6654) 2024-12-19 12:01:12 +00:00
Srikanth Chekuri
77420b9d3a chore: address some gaps in k8s monitoring (#6653) 2024-12-19 17:22:39 +05:30
Prashant Shahi
cecc57e72d Merge pull request #6668 from SigNoz/chore/deprecate-develop
chore: develop deprecation and related changes
2024-12-19 13:48:29 +05:30
Prashant Shahi
512adc6471 Merge branch 'main' into chore/deprecate-develop 2024-12-19 13:35:27 +05:30
Prashant Shahi
42fefc65be chore: deprecate develop branch - use main
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-19 13:33:09 +05:30
Prashant Shahi
dcc659907a chore(signoz): pin versions: SigNoz 0.64.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-19 13:33:09 +05:30
Prashant Shahi
b90ed375c2 chore(signoz): pin versions: SigNoz 0.63.0, SigNoz OtelCollector 0.111.16
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-19 13:33:09 +05:30
Prashant Shahi
a8a3bd3f7d chore(signoz): pin versions: SigNoz 0.62.0, SigNoz OtelCollector 0.111.15
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-19 13:33:09 +05:30
Prashant Shahi
823f84f857 Merge pull request #6664 from SigNoz/release/v0.64.x
Release/v0.64.x
2024-12-18 18:29:05 +05:30
Prashant Shahi
8a4d45084d chore(signoz): pin versions: SigNoz 0.64.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-18 17:57:42 +05:30
Prashant Shahi
5bc6c33899 Merge branch 'main' into release/v0.64.x 2024-12-18 17:55:57 +05:30
Prashant Shahi
46bc7c7a21 Merge pull request #6662 from SigNoz/release/v0.63.x
Release/v0.63.x
2024-12-18 15:41:24 +05:30
Prashant Shahi
6d9741c3a4 chore(signoz): pin versions: SigNoz 0.63.0, SigNoz OtelCollector 0.111.16
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-18 15:25:20 +05:30
Prashant Shahi
610a8ec704 Merge branch 'main' into release/v0.63.x 2024-12-18 15:07:57 +05:30
Prashant Shahi
2b5a0ec496 Merge pull request #6625 from SigNoz/release/v0.62.x
Release/v0.62.x
2024-12-12 21:02:17 +05:30
Prashant Shahi
a9440c010c chore(signoz): pin versions: SigNoz 0.62.0, SigNoz OtelCollector 0.111.15
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-12 15:28:09 +05:30
Prashant Shahi
f9e7eff357 Merge branch 'main' into release/v0.62.x 2024-12-12 15:22:47 +05:30
Prashant Shahi
47d8c9e3e7 Merge pull request #6593 from SigNoz/release-sync/v0.61.x
Release Sync/v0.61.x
2024-12-04 21:28:47 +05:30
41 changed files with 1108 additions and 74 deletions

View File

@@ -3,7 +3,6 @@ name: build-pipeline
on:
pull_request:
branches:
- develop
- main
- release/v*

View File

@@ -3,7 +3,7 @@ name: "Update PR labels and Block PR until related docs are shipped for the feat
on:
pull_request:
branches:
- develop
- main
types: [opened, edited, labeled, unlabeled]
permissions:

View File

@@ -42,7 +42,7 @@ jobs:
kubectl create ns sample-application
# apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s

View File

@@ -2,7 +2,8 @@ name: Jest Coverage - changed files
on:
pull_request:
branches: develop
branches:
- main
jobs:
build:
@@ -11,7 +12,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
ref: "refs/heads/develop"
ref: "refs/heads/main"
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
- name: Fetch branch

View File

@@ -4,7 +4,6 @@ on:
push:
branches:
- main
- develop
tags:
- v*

View File

@@ -3,7 +3,6 @@ on:
pull_request:
branches:
- main
- develop
paths:
- 'frontend/**'
defaults:

View File

@@ -1,12 +1,12 @@
name: staging-deployment
# Trigger deployment only on push to develop branch
# Trigger deployment only on push to main branch
on:
push:
branches:
- develop
- main
jobs:
deploy:
name: Deploy latest develop branch to staging
name: Deploy latest main branch to staging
runs-on: ubuntu-latest
environment: staging
permissions:

View File

@@ -44,7 +44,7 @@ jobs:
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout develop
git checkout main
git pull
# This is added to include the scenerio when new commit in PR is force-pushed
git branch -D ${GITHUB_BRANCH}

View File

@@ -339,7 +339,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**5.1.1 To install the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
```
@@ -362,7 +362,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
**5.1.4 To delete the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash
```

View File

@@ -58,7 +58,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
```
To generate load:

View File

@@ -146,7 +146,7 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.61.0
image: signoz/query-service:0.64.0
command:
[
"-config=/root/config/prometheus.yml",
@@ -187,7 +187,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:0.61.0
image: signoz/frontend:0.64.0
deploy:
restart_policy:
condition: on-failure
@@ -200,7 +200,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.111.14
image: signoz/signoz-otel-collector:0.111.16
command:
[
"--config=/etc/otel-collector-config.yaml",
@@ -238,7 +238,7 @@ services:
- query-service
otel-collector-migrator:
image: signoz/signoz-schema-migrator:0.111.14
image: signoz/signoz-schema-migrator:0.111.16
deploy:
restart_policy:
condition: on-failure

View File

@@ -69,7 +69,7 @@ services:
- --storage.path=/data
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator
command:
- "sync"
@@ -86,7 +86,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: signoz-otel-collector
image: signoz/signoz-otel-collector:0.111.14
image: signoz/signoz-otel-collector:0.111.16
command:
[
"--config=/etc/otel-collector-config.yaml",

View File

@@ -162,7 +162,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.61.0}
image: signoz/query-service:${DOCKER_TAG:-0.64.0}
container_name: signoz-query-service
command:
[
@@ -202,7 +202,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.61.0}
image: signoz/frontend:${DOCKER_TAG:-0.64.0}
container_name: signoz-frontend
restart: on-failure
depends_on:
@@ -214,7 +214,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator-sync:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator-sync
command:
- "sync"
@@ -229,7 +229,7 @@ services:
# condition: service_healthy
otel-collector-migrator-async:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator-async
command:
- "async"
@@ -246,7 +246,7 @@ services:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16}
container_name: signoz-otel-collector
command:
[

View File

@@ -167,7 +167,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.61.0}
image: signoz/query-service:${DOCKER_TAG:-0.64.0}
container_name: signoz-query-service
command:
[
@@ -209,7 +209,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.61.0}
image: signoz/frontend:${DOCKER_TAG:-0.64.0}
container_name: signoz-frontend
restart: on-failure
depends_on:
@@ -221,7 +221,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
@@ -235,7 +235,7 @@ services:
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16}
container_name: signoz-otel-collector
command:
[

View File

@@ -13,8 +13,3 @@ if [ "$branch" = "main" ]; then
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
exit 1
fi
if [ "$branch" = "develop" ]; then
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
exit 1
fi

2
go.mod
View File

@@ -8,7 +8,7 @@ require (
github.com/ClickHouse/clickhouse-go/v2 v2.25.0
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
github.com/SigNoz/signoz-otel-collector v0.111.14
github.com/SigNoz/signoz-otel-collector v0.111.16
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974
github.com/antonmedv/expr v1.15.3

4
go.sum
View File

@@ -70,8 +70,8 @@ github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkb
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc=
github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PUmIE=
github.com/SigNoz/prometheus v1.12.0/go.mod h1:EqNM27OwmPfqMUk+E+XG1L9rfDFcyXnzzDrg0EPOfxA=
github.com/SigNoz/signoz-otel-collector v0.111.14 h1:nvRucNK/TTtZKM3Dsr/UNx+LwkjaGwx0yPlMvGw/4j0=
github.com/SigNoz/signoz-otel-collector v0.111.14/go.mod h1:vRDT10om89DHybN7SRMlt8IN9+/pgh1D57pNHPr2LM4=
github.com/SigNoz/signoz-otel-collector v0.111.16 h1:535uKH5Oux+35EsI+L3C6pnAP/Ye0PTCbVizXoL+VqE=
github.com/SigNoz/signoz-otel-collector v0.111.16/go.mod h1:HJ4m0LY1MPsuZmuRF7Ixb+bY8rxgRzI0VXzOedESsjg=
github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc=
github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo=
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY=

View File

@@ -125,6 +125,8 @@ type APIHandler struct {
daemonsetsRepo *inframetrics.DaemonSetsRepo
statefulsetsRepo *inframetrics.StatefulSetsRepo
jobsRepo *inframetrics.JobsRepo
pvcsRepo *inframetrics.PvcsRepo
}
type APIHandlerOpts struct {
@@ -208,6 +210,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
daemonsetsRepo := inframetrics.NewDaemonSetsRepo(opts.Reader, querierv2)
statefulsetsRepo := inframetrics.NewStatefulSetsRepo(opts.Reader, querierv2)
jobsRepo := inframetrics.NewJobsRepo(opts.Reader, querierv2)
pvcsRepo := inframetrics.NewPvcsRepo(opts.Reader, querierv2)
aH := &APIHandler{
reader: opts.Reader,
@@ -237,6 +240,7 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
daemonsetsRepo: daemonsetsRepo,
statefulsetsRepo: statefulsetsRepo,
jobsRepo: jobsRepo,
pvcsRepo: pvcsRepo,
}
logsQueryBuilder := logsv3.PrepareLogsQuery
@@ -408,6 +412,11 @@ func (aH *APIHandler) RegisterInfraMetricsRoutes(router *mux.Router, am *AuthMid
podsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getPodAttributeValues)).Methods(http.MethodGet)
podsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getPodList)).Methods(http.MethodPost)
pvcsSubRouter := router.PathPrefix("/api/v1/pvcs").Subrouter()
pvcsSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getPvcAttributeKeys)).Methods(http.MethodGet)
pvcsSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getPvcAttributeValues)).Methods(http.MethodGet)
pvcsSubRouter.HandleFunc("/list", am.ViewAccess(aH.getPvcList)).Methods(http.MethodPost)
nodesSubRouter := router.PathPrefix("/api/v1/nodes").Subrouter()
nodesSubRouter.HandleFunc("/attribute_keys", am.ViewAccess(aH.getNodeAttributeKeys)).Methods(http.MethodGet)
nodesSubRouter.HandleFunc("/attribute_values", am.ViewAccess(aH.getNodeAttributeValues)).Methods(http.MethodGet)

View File

@@ -544,3 +544,56 @@ func (aH *APIHandler) getJobList(w http.ResponseWriter, r *http.Request) {
aH.Respond(w, jobList)
}
func (aH *APIHandler) getPvcList(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req := model.VolumeListRequest{}
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
pvcList, err := aH.pvcsRepo.GetPvcList(ctx, req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, pvcList)
}
func (aH *APIHandler) getPvcAttributeKeys(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeKeyRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
keys, err := aH.pvcsRepo.GetPvcAttributeKeys(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, keys)
}
func (aH *APIHandler) getPvcAttributeValues(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
req, err := parseFilterAttributeValueRequest(r)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
values, err := aH.pvcsRepo.GetPvcAttributeValues(ctx, *req)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorInternal, Err: err}, nil)
return
}
aH.Respond(w, values)
}

View File

@@ -89,6 +89,10 @@ func getParamsForTopJobs(req model.JobListRequest) (int64, string, string) {
return getParamsForTopItems(req.Start, req.End)
}
func getParamsForTopVolumes(req model.VolumeListRequest) (int64, string, string) {
return getParamsForTopItems(req.Start, req.End)
}
// TODO(srikanthccv): remove this
// What is happening here?
// The `PrepareTimeseriesFilterQuery` uses the local time series table for sub-query because each fingerprint

View File

@@ -23,10 +23,11 @@ var (
}
queryNamesForNamespaces = map[string][]string{
"cpu": {"A"},
"memory": {"D"},
"cpu": {"A"},
"memory": {"D"},
"pod_phase": {"H", "I", "J", "K"},
}
namespaceQueryNames = []string{"A", "D"}
namespaceQueryNames = []string{"A", "D", "H", "I", "J", "K"}
attributesKeysForNamespaces = []v3.AttributeKey{
{Key: "k8s_namespace_name"},
@@ -307,6 +308,19 @@ func (p *NamespacesRepo) GetNamespaceList(ctx context.Context, req model.Namespa
record.MemoryUsage = memory
}
if pending, ok := row.Data["H"].(float64); ok {
record.CountByPhase.Pending = int(pending)
}
if running, ok := row.Data["I"].(float64); ok {
record.CountByPhase.Running = int(running)
}
if succeeded, ok := row.Data["J"].(float64); ok {
record.CountByPhase.Succeeded = int(succeeded)
}
if failed, ok := row.Data["K"].(float64); ok {
record.CountByPhase.Failed = int(failed)
}
record.Meta = map[string]string{}
if _, ok := namespaceAttrs[record.NamespaceName]; ok {
record.Meta = namespaceAttrs[record.NamespaceName]

View File

@@ -17,7 +17,7 @@ import (
var (
metricToUseForNodes = "k8s_node_cpu_utilization"
nodeAttrsToEnrich = []string{"k8s_node_name", "k8s_node_uid"}
nodeAttrsToEnrich = []string{"k8s_node_name", "k8s_node_uid", "k8s_cluster_name"}
k8sNodeUIDAttrKey = "k8s_node_uid"
@@ -27,13 +27,14 @@ var (
"memory": {"C"},
"memory_allocatable": {"D"},
}
nodeQueryNames = []string{"A", "B", "C", "D"}
nodeQueryNames = []string{"A", "B", "C", "D", "E", "F"}
metricNamesForNodes = map[string]string{
"cpu": "k8s_node_cpu_utilization",
"cpu_allocatable": "k8s_node_allocatable_cpu",
"memory": "k8s_node_memory_usage",
"memory_allocatable": "k8s_node_allocatable_memory",
"node_condition": "k8s_node_condition_ready",
}
)
@@ -325,6 +326,14 @@ func (p *NodesRepo) GetNodeList(ctx context.Context, req model.NodeListRequest)
record.NodeMemoryAllocatable = memory
}
if ready, ok := row.Data["E"].(float64); ok {
record.CountByCondition.Ready = int(ready)
}
if notReady, ok := row.Data["F"].(float64); ok {
record.CountByCondition.NotReady = int(notReady)
}
record.Meta = map[string]string{}
if _, ok := nodeAttrs[record.NodeUID]; ok {
record.Meta = nodeAttrs[record.NodeUID]

View File

@@ -109,6 +109,74 @@ var NodesTableListQuery = v3.QueryRangeParamsV3{
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// node conditions - Ready
"E": {
QueryName: "E",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForNodes["node_condition"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: "__value",
},
Operator: v3.FilterOperatorEqual,
Value: 1,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sNodeUIDAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "E",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// node conditions - NotReady
"F": {
QueryName: "F",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForNodes["node_condition"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: "__value",
},
Operator: v3.FilterOperatorEqual,
Value: 0,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sNodeUIDAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "F",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
},
PanelType: v3.PanelTypeTable,
QueryType: v3.QueryTypeBuilder,

View File

@@ -27,6 +27,7 @@ var (
"k8s_daemonset_name",
"k8s_job_name",
"k8s_cronjob_name",
"k8s_cluster_name",
}
k8sPodUIDAttrKey = "k8s_pod_uid"
@@ -39,8 +40,9 @@ var (
"memory_request": {"E", "D"},
"memory_limit": {"F", "D"},
"restarts": {"G", "A"},
"pod_phase": {"H", "I", "J", "K"},
}
podQueryNames = []string{"A", "B", "C", "D", "E", "F", "G"}
podQueryNames = []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"}
metricNamesForPods = map[string]string{
"cpu": "k8s_pod_cpu_utilization",
@@ -50,6 +52,7 @@ var (
"memory_request": "k8s_pod_memory_request_utilization",
"memory_limit": "k8s_pod_memory_limit_utilization",
"restarts": "k8s_container_restarts",
"pod_phase": "k8s_pod_phase",
}
)
@@ -365,6 +368,22 @@ func (p *PodsRepo) GetPodList(ctx context.Context, req model.PodListRequest) (mo
record.RestartCount = int(restarts)
}
if pending, ok := row.Data["H"].(float64); ok {
record.CountByPhase.Pending = int(pending)
}
if running, ok := row.Data["I"].(float64); ok {
record.CountByPhase.Running = int(running)
}
if succeeded, ok := row.Data["J"].(float64); ok {
record.CountByPhase.Succeeded = int(succeeded)
}
if failed, ok := row.Data["K"].(float64); ok {
record.CountByPhase.Failed = int(failed)
}
record.Meta = map[string]string{}
if _, ok := podAttrs[record.PodUID]; ok {
record.Meta = podAttrs[record.PodUID]

View File

@@ -54,7 +54,7 @@ var PodsTableListQuery = v3.QueryRangeParamsV3{
Expression: "B",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
SpaceAggregation: v3.SpaceAggregationAvg,
Disabled: false,
},
// pod cpu limit utilization
@@ -80,7 +80,7 @@ var PodsTableListQuery = v3.QueryRangeParamsV3{
Expression: "C",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
SpaceAggregation: v3.SpaceAggregationAvg,
Disabled: false,
},
// pod memory utilization
@@ -132,7 +132,7 @@ var PodsTableListQuery = v3.QueryRangeParamsV3{
Expression: "E",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
SpaceAggregation: v3.SpaceAggregationAvg,
Disabled: false,
},
// pod memory limit utilization
@@ -158,7 +158,7 @@ var PodsTableListQuery = v3.QueryRangeParamsV3{
Expression: "F",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
SpaceAggregation: v3.SpaceAggregationAvg,
Disabled: false,
},
"G": {
@@ -187,6 +187,142 @@ var PodsTableListQuery = v3.QueryRangeParamsV3{
Functions: []v3.Function{{Name: v3.FunctionNameRunningDiff}},
Disabled: false,
},
// pod phase pending
"H": {
QueryName: "H",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForPods["pod_phase"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: "__value",
},
Operator: v3.FilterOperatorEqual,
Value: 1,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sPodUIDAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "H",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationCount,
Disabled: false,
},
// pod phase running
"I": {
QueryName: "I",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForPods["pod_phase"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: "__value",
},
Operator: v3.FilterOperatorEqual,
Value: 2,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sPodUIDAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "I",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationCount,
Disabled: false,
},
// pod phase succeeded
"J": {
QueryName: "J",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForPods["pod_phase"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: "__value",
},
Operator: v3.FilterOperatorEqual,
Value: 3,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sPodUIDAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "J",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationCount,
Disabled: false,
},
// pod phase failed
"K": {
QueryName: "K",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForPods["pod_phase"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: "__value",
},
Operator: v3.FilterOperatorEqual,
Value: 4,
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sPodUIDAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "K",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAnyLast,
SpaceAggregation: v3.SpaceAggregationCount,
Disabled: false,
},
},
PanelType: v3.PanelTypeTable,
QueryType: v3.QueryTypeBuilder,

View File

@@ -0,0 +1,378 @@
package inframetrics
import (
"context"
"math"
"sort"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/interfaces"
"go.signoz.io/signoz/pkg/query-service/model"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.signoz.io/signoz/pkg/query-service/postprocess"
"golang.org/x/exp/slices"
)
var (
metricToUseForVolumes = "k8s_volume_available"
volumeAttrsToEnrich = []string{
"k8s_pod_uid",
"k8s_pod_name",
"k8s_namespace_name",
"k8s_node_name",
"k8s_statefulset_name",
"k8s_cluster_name",
"k8s_persistentvolumeclaim_name",
}
k8sPersistentVolumeClaimNameAttrKey = "k8s_persistentvolumeclaim_name"
queryNamesForVolumes = map[string][]string{
"available": {"A"},
"capacity": {"B", "A"},
"usage": {"F1", "B", "A"},
"inodes": {"C", "A"},
"inodes_free": {"D", "A"},
"inodes_used": {"E", "A"},
}
volumeQueryNames = []string{"A", "B", "C", "D", "E", "F1"}
metricNamesForVolumes = map[string]string{
"available": "k8s_volume_available",
"capacity": "k8s_volume_capacity",
"inodes": "k8s_volume_inodes",
"inodes_free": "k8s_volume_inodes_free",
"inodes_used": "k8s_volume_inodes_used",
}
)
type PvcsRepo struct {
reader interfaces.Reader
querierV2 interfaces.Querier
}
func NewPvcsRepo(reader interfaces.Reader, querierV2 interfaces.Querier) *PvcsRepo {
return &PvcsRepo{reader: reader, querierV2: querierV2}
}
func (p *PvcsRepo) GetPvcAttributeKeys(ctx context.Context, req v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error) {
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForVolumes
if req.Limit == 0 {
req.Limit = 50
}
attributeKeysResponse, err := p.reader.GetMetricAttributeKeys(ctx, &req)
if err != nil {
return nil, err
}
// TODO(srikanthccv): only return resource attributes when we have a way to
// distinguish between resource attributes and other attributes.
filteredKeys := []v3.AttributeKey{}
for _, key := range attributeKeysResponse.AttributeKeys {
if slices.Contains(pointAttrsToIgnore, key.Key) {
continue
}
filteredKeys = append(filteredKeys, key)
}
return &v3.FilterAttributeKeyResponse{AttributeKeys: filteredKeys}, nil
}
func (p *PvcsRepo) GetPvcAttributeValues(ctx context.Context, req v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error) {
req.DataSource = v3.DataSourceMetrics
req.AggregateAttribute = metricToUseForVolumes
if req.Limit == 0 {
req.Limit = 50
}
attributeValuesResponse, err := p.reader.GetMetricAttributeValues(ctx, &req)
if err != nil {
return nil, err
}
return attributeValuesResponse, nil
}
func (p *PvcsRepo) getMetadataAttributes(ctx context.Context, req model.VolumeListRequest) (map[string]map[string]string, error) {
volumeAttrs := map[string]map[string]string{}
for _, key := range volumeAttrsToEnrich {
hasKey := false
for _, groupByKey := range req.GroupBy {
if groupByKey.Key == key {
hasKey = true
break
}
}
if !hasKey {
req.GroupBy = append(req.GroupBy, v3.AttributeKey{Key: key})
}
}
mq := v3.BuilderQuery{
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricToUseForVolumes,
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
GroupBy: req.GroupBy,
}
query, err := helpers.PrepareTimeseriesFilterQuery(req.Start, req.End, &mq)
if err != nil {
return nil, err
}
query = localQueryToDistributedQuery(query)
attrsListResponse, err := p.reader.GetListResultV3(ctx, query)
if err != nil {
return nil, err
}
for _, row := range attrsListResponse {
stringData := map[string]string{}
for key, value := range row.Data {
if str, ok := value.(string); ok {
stringData[key] = str
} else if strPtr, ok := value.(*string); ok {
stringData[key] = *strPtr
}
}
volumeName := stringData[k8sPersistentVolumeClaimNameAttrKey]
if _, ok := volumeAttrs[volumeName]; !ok {
volumeAttrs[volumeName] = map[string]string{}
}
for _, key := range req.GroupBy {
volumeAttrs[volumeName][key.Key] = stringData[key.Key]
}
}
return volumeAttrs, nil
}
func (p *PvcsRepo) getTopVolumeGroups(ctx context.Context, req model.VolumeListRequest, q *v3.QueryRangeParamsV3) ([]map[string]string, []map[string]string, error) {
step, timeSeriesTableName, samplesTableName := getParamsForTopVolumes(req)
queryNames := queryNamesForVolumes[req.OrderBy.ColumnName]
topVolumeGroupsQueryRangeParams := &v3.QueryRangeParamsV3{
Start: req.Start,
End: req.End,
Step: step,
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{},
QueryType: v3.QueryTypeBuilder,
PanelType: v3.PanelTypeTable,
},
}
for _, queryName := range queryNames {
query := q.CompositeQuery.BuilderQueries[queryName].Clone()
query.StepInterval = step
query.MetricTableHints = &v3.MetricTableHints{
TimeSeriesTableName: timeSeriesTableName,
SamplesTableName: samplesTableName,
}
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
topVolumeGroupsQueryRangeParams.CompositeQuery.BuilderQueries[queryName] = query
}
queryResponse, _, err := p.querierV2.QueryRange(ctx, topVolumeGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, topVolumeGroupsQueryRangeParams)
if err != nil {
return nil, nil, err
}
if len(formattedResponse) == 0 || len(formattedResponse[0].Series) == 0 {
return nil, nil, nil
}
if req.OrderBy.Order == v3.DirectionDesc {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value > formattedResponse[0].Series[j].Points[0].Value
})
} else {
sort.Slice(formattedResponse[0].Series, func(i, j int) bool {
return formattedResponse[0].Series[i].Points[0].Value < formattedResponse[0].Series[j].Points[0].Value
})
}
limit := math.Min(float64(req.Offset+req.Limit), float64(len(formattedResponse[0].Series)))
paginatedTopVolumeGroupsSeries := formattedResponse[0].Series[req.Offset:int(limit)]
topVolumeGroups := []map[string]string{}
for _, series := range paginatedTopVolumeGroupsSeries {
topVolumeGroups = append(topVolumeGroups, series.Labels)
}
allVolumeGroups := []map[string]string{}
for _, series := range formattedResponse[0].Series {
allVolumeGroups = append(allVolumeGroups, series.Labels)
}
return topVolumeGroups, allVolumeGroups, nil
}
func (p *PvcsRepo) GetPvcList(ctx context.Context, req model.VolumeListRequest) (model.VolumeListResponse, error) {
resp := model.VolumeListResponse{}
if req.Limit == 0 {
req.Limit = 10
}
if req.OrderBy == nil {
req.OrderBy = &v3.OrderBy{ColumnName: "usage", Order: v3.DirectionDesc}
}
if req.GroupBy == nil {
req.GroupBy = []v3.AttributeKey{{Key: k8sPersistentVolumeClaimNameAttrKey}}
resp.Type = model.ResponseTypeList
} else {
resp.Type = model.ResponseTypeGroupedList
}
step := int64(math.Max(float64(common.MinAllowedStepInterval(req.Start, req.End)), 60))
query := PvcsTableListQuery.Clone()
query.Start = req.Start
query.End = req.End
query.Step = step
for _, query := range query.CompositeQuery.BuilderQueries {
query.StepInterval = step
if req.Filters != nil && len(req.Filters.Items) > 0 {
if query.Filters == nil {
query.Filters = &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{}}
}
query.Filters.Items = append(query.Filters.Items, req.Filters.Items...)
}
query.GroupBy = req.GroupBy
}
volumeAttrs, err := p.getMetadataAttributes(ctx, req)
if err != nil {
return resp, err
}
topVolumeGroups, allVolumeGroups, err := p.getTopVolumeGroups(ctx, req, query)
if err != nil {
return resp, err
}
groupFilters := map[string][]string{}
for _, topVolumeGroup := range topVolumeGroups {
for k, v := range topVolumeGroup {
groupFilters[k] = append(groupFilters[k], v)
}
}
for groupKey, groupValues := range groupFilters {
hasGroupFilter := false
if req.Filters != nil && len(req.Filters.Items) > 0 {
for _, filter := range req.Filters.Items {
if filter.Key.Key == groupKey {
hasGroupFilter = true
break
}
}
}
if !hasGroupFilter {
for _, query := range query.CompositeQuery.BuilderQueries {
query.Filters.Items = append(query.Filters.Items, v3.FilterItem{
Key: v3.AttributeKey{Key: groupKey},
Value: groupValues,
Operator: v3.FilterOperatorIn,
})
}
}
}
queryResponse, _, err := p.querierV2.QueryRange(ctx, query)
if err != nil {
return resp, err
}
formattedResponse, err := postprocess.PostProcessResult(queryResponse, query)
if err != nil {
return resp, err
}
records := []model.VolumeListRecord{}
for _, result := range formattedResponse {
for _, row := range result.Table.Rows {
record := model.VolumeListRecord{
VolumeUsage: -1,
VolumeAvailable: -1,
VolumeCapacity: -1,
VolumeInodes: -1,
VolumeInodesFree: -1,
VolumeInodesUsed: -1,
Meta: map[string]string{},
}
if volumeName, ok := row.Data[k8sPersistentVolumeClaimNameAttrKey].(string); ok {
record.PersistentVolumeClaimName = volumeName
}
if volumeAvailable, ok := row.Data["A"].(float64); ok {
record.VolumeAvailable = volumeAvailable
}
if volumeCapacity, ok := row.Data["B"].(float64); ok {
record.VolumeCapacity = volumeCapacity
}
if volumeInodes, ok := row.Data["C"].(float64); ok {
record.VolumeInodes = volumeInodes
}
if volumeInodesFree, ok := row.Data["D"].(float64); ok {
record.VolumeInodesFree = volumeInodesFree
}
if volumeInodesUsed, ok := row.Data["E"].(float64); ok {
record.VolumeInodesUsed = volumeInodesUsed
}
record.VolumeUsage = record.VolumeCapacity - record.VolumeAvailable
record.Meta = map[string]string{}
if _, ok := volumeAttrs[record.PersistentVolumeClaimName]; ok {
record.Meta = volumeAttrs[record.PersistentVolumeClaimName]
}
for k, v := range row.Data {
if slices.Contains(volumeQueryNames, k) {
continue
}
if labelValue, ok := v.(string); ok {
record.Meta[k] = labelValue
}
}
records = append(records, record)
}
}
resp.Total = len(allVolumeGroups)
resp.Records = records
return resp, nil
}

View File

@@ -0,0 +1,204 @@
package inframetrics
import v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
var PvcsTableListQuery = v3.QueryRangeParamsV3{
CompositeQuery: &v3.CompositeQuery{
BuilderQueries: map[string]*v3.BuilderQuery{
// k8s.volume.available
"A": {
QueryName: "A",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForVolumes["available"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: k8sPersistentVolumeClaimNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotEqual,
Value: "",
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sPersistentVolumeClaimNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "A",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// k8s.volume.capacity
"B": {
QueryName: "B",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForVolumes["capacity"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: k8sPersistentVolumeClaimNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotEqual,
Value: "",
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sPersistentVolumeClaimNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "B",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
"F1": {
QueryName: "F1",
DataSource: v3.DataSourceMetrics,
Expression: "B - A",
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{},
},
ReduceTo: v3.ReduceToOperatorLast,
},
// k8s.volume.inodes
"C": {
QueryName: "C",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForVolumes["inodes"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: k8sPersistentVolumeClaimNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotEqual,
Value: "",
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sPersistentVolumeClaimNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "C",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// k8s.volume.inodes_free
"D": {
QueryName: "D",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForVolumes["inodes_free"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: k8sPersistentVolumeClaimNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotEqual,
Value: "",
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sPersistentVolumeClaimNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "D",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
// k8s.volume.inodes_used
"E": {
QueryName: "E",
DataSource: v3.DataSourceMetrics,
AggregateAttribute: v3.AttributeKey{
Key: metricNamesForVolumes["inodes_used"],
DataType: v3.AttributeKeyDataTypeFloat64,
},
Temporality: v3.Unspecified,
Filters: &v3.FilterSet{
Operator: "AND",
Items: []v3.FilterItem{
{
Key: v3.AttributeKey{
Key: k8sPersistentVolumeClaimNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
Operator: v3.FilterOperatorNotEqual,
Value: "",
},
},
},
GroupBy: []v3.AttributeKey{
{
Key: k8sPersistentVolumeClaimNameAttrKey,
DataType: v3.AttributeKeyDataTypeString,
Type: v3.AttributeKeyTypeResource,
},
},
Expression: "E",
ReduceTo: v3.ReduceToOperatorLast,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
Disabled: false,
},
},
PanelType: v3.PanelTypeTable,
QueryType: v3.QueryTypeBuilder,
},
Version: "v4",
FormatForWeb: true,
}

View File

@@ -4,13 +4,13 @@ import v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
var (
metricNamesForWorkloads = map[string]string{
"cpu": "k8s_pod_cpu_utilization",
"cpu_req": "k8s_pod_cpu_request_utilization",
"cpu_limit": "k8s_pod_cpu_limit_utilization",
"memory": "k8s_pod_memory_usage",
"memory_req": "k8s_pod_memory_request_utilization",
"memory_limit": "k8s_pod_memory_limit_utilization",
"restarts": "k8s_container_restarts",
"cpu": "k8s_pod_cpu_utilization",
"cpu_request": "k8s_pod_cpu_request_utilization",
"cpu_limit": "k8s_pod_cpu_limit_utilization",
"memory": "k8s_pod_memory_usage",
"memory_request": "k8s_pod_memory_request_utilization",
"memory_limit": "k8s_pod_memory_limit_utilization",
"restarts": "k8s_container_restarts",
}
)
@@ -54,7 +54,7 @@ var WorkloadTableListQuery = v3.QueryRangeParamsV3{
Expression: "B",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
SpaceAggregation: v3.SpaceAggregationAvg,
Disabled: false,
},
// pod cpu limit utilization
@@ -74,7 +74,7 @@ var WorkloadTableListQuery = v3.QueryRangeParamsV3{
Expression: "C",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
SpaceAggregation: v3.SpaceAggregationAvg,
Disabled: false,
},
// pod memory utilization
@@ -114,7 +114,7 @@ var WorkloadTableListQuery = v3.QueryRangeParamsV3{
Expression: "E",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
SpaceAggregation: v3.SpaceAggregationAvg,
Disabled: false,
},
// pod memory limit utilization
@@ -134,7 +134,7 @@ var WorkloadTableListQuery = v3.QueryRangeParamsV3{
Expression: "F",
ReduceTo: v3.ReduceToOperatorAvg,
TimeAggregation: v3.TimeAggregationAvg,
SpaceAggregation: v3.SpaceAggregationSum,
SpaceAggregation: v3.SpaceAggregationAvg,
Disabled: false,
},
"G": {

View File

@@ -5,9 +5,73 @@ import (
"reflect"
"strings"
v3 "go.signoz.io/signoz/pkg/query-service/model/v3"
"go.uber.org/zap"
)
func AddMetricValueFilter(mq *v3.BuilderQuery) *v3.MetricValueFilter {
var metricValueFilter *v3.MetricValueFilter = nil
if mq != nil && mq.Filters != nil && mq.Filters.Items != nil {
for _, item := range mq.Filters.Items {
if item.Key.Key == "__value" {
switch v := item.Value.(type) {
case float64:
metricValueFilter = &v3.MetricValueFilter{
Value: v,
}
case float32:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
case int:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
case int8:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
case int16:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
case int32:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
case int64:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
case uint:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
case uint8:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
case uint16:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
case uint32:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
case uint64:
metricValueFilter = &v3.MetricValueFilter{
Value: float64(v),
}
}
}
}
}
return metricValueFilter
}
// FormattedValue formats the value to be used in clickhouse query
func FormattedValue(v interface{}) string {
switch x := v.(type) {

View File

@@ -5,6 +5,7 @@ import (
"strings"
"time"
"go.signoz.io/signoz/pkg/query-service/app/metrics"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/helpers"
"go.signoz.io/signoz/pkg/query-service/common"
"go.signoz.io/signoz/pkg/query-service/constants"
@@ -335,6 +336,10 @@ func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.P
start, end = common.AdjustedMetricTimeRange(start, end, mq.StepInterval, *mq)
if valFilter := metrics.AddMetricValueFilter(mq); valFilter != nil {
mq.MetricValueFilter = valFilter
}
// if the aggregate operator is a histogram quantile, and user has not forgotten
// the le tag in the group by then add the le tag to the group by
if mq.AggregateOperator == v3.AggregateOperatorHistQuant50 ||

View File

@@ -20,11 +20,16 @@ func PrepareMetricQueryCumulativeTable(start, end, step int64, mq *v3.BuilderQue
orderBy := helpers.OrderByAttributeKeyTags(mq.OrderBy, mq.GroupBy)
selectLabels := helpers.GroupByAttributeKeyTags(mq.GroupBy...)
valueFilter := " WHERE isNaN(per_series_value) = 0"
if mq.MetricValueFilter != nil {
valueFilter += fmt.Sprintf(" AND per_series_value = %f", mq.MetricValueFilter.Value)
}
queryTmpl :=
"SELECT %s," +
" %s as value" +
" FROM (%s)" +
" WHERE isNaN(per_series_value) = 0" +
valueFilter +
" GROUP BY %s" +
" ORDER BY %s"

View File

@@ -190,11 +190,16 @@ func PrepareMetricQueryCumulativeTimeSeries(start, end, step int64, mq *v3.Build
orderBy := helpers.OrderByAttributeKeyTags(mq.OrderBy, mq.GroupBy)
selectLabels := helpers.GroupByAttributeKeyTags(mq.GroupBy...)
valueFilter := " WHERE isNaN(per_series_value) = 0"
if mq.MetricValueFilter != nil {
valueFilter += fmt.Sprintf(" AND per_series_value = %f", mq.MetricValueFilter.Value)
}
queryTmpl :=
"SELECT %s," +
" %s as value" +
" FROM (%s)" +
" WHERE isNaN(per_series_value) = 0" +
valueFilter +
" GROUP BY %s" +
" ORDER BY %s"

View File

@@ -25,11 +25,16 @@ func PrepareMetricQueryDeltaTable(start, end, step int64, mq *v3.BuilderQuery) (
orderBy := helpers.OrderByAttributeKeyTags(mq.OrderBy, mq.GroupBy)
selectLabels := helpers.GroupByAttributeKeyTags(mq.GroupBy...)
valueFilter := " WHERE isNaN(per_series_value) = 0"
if mq.MetricValueFilter != nil {
valueFilter += fmt.Sprintf(" AND per_series_value = %f", mq.MetricValueFilter.Value)
}
queryTmpl :=
"SELECT %s," +
" %s as value" +
" FROM (%s)" +
" WHERE isNaN(per_series_value) = 0" +
valueFilter +
" GROUP BY %s" +
" ORDER BY %s"

View File

@@ -142,11 +142,16 @@ func PrepareMetricQueryDeltaTimeSeries(start, end, step int64, mq *v3.BuilderQue
orderBy := helpers.OrderByAttributeKeyTags(mq.OrderBy, mq.GroupBy)
selectLabels := helpers.GroupByAttributeKeyTags(mq.GroupBy...)
valueFilter := " WHERE isNaN(per_series_value) = 0"
if mq.MetricValueFilter != nil {
valueFilter += fmt.Sprintf(" AND per_series_value = %f", mq.MetricValueFilter.Value)
}
queryTmpl :=
"SELECT %s," +
" %s as value" +
" FROM (%s)" +
" WHERE isNaN(per_series_value) = 0" +
valueFilter +
" GROUP BY %s" +
" ORDER BY %s"

View File

@@ -270,6 +270,10 @@ func PrepareTimeseriesFilterQuery(start, end int64, mq *v3.BuilderQuery) (string
if fs != nil && len(fs.Items) != 0 {
for _, item := range fs.Items {
if item.Key.Key == "__value" {
continue
}
toFormat := item.Value
op := v3.FilterOperator(strings.ToLower(strings.TrimSpace(string(item.Operator))))
if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains {

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"time"
"go.signoz.io/signoz/pkg/query-service/app/metrics"
metricsV3 "go.signoz.io/signoz/pkg/query-service/app/metrics/v3"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/cumulative"
"go.signoz.io/signoz/pkg/query-service/app/metrics/v4/delta"
@@ -19,6 +20,9 @@ import (
// step is in seconds
func PrepareMetricQuery(start, end int64, queryType v3.QueryType, panelType v3.PanelType, mq *v3.BuilderQuery, options metricsV3.Options) (string, error) {
if valFilter := metrics.AddMetricValueFilter(mq); valFilter != nil {
mq.MetricValueFilter = valFilter
}
start, end = common.AdjustedMetricTimeRange(start, end, mq.StepInterval, *mq)
var quantile float64

View File

@@ -151,13 +151,20 @@ type NodeListResponse struct {
Total int `json:"total"`
}
type NodeCountByCondition struct {
Ready int `json:"ready"`
NotReady int `json:"notReady"`
Unknown int `json:"unknown"`
}
type NodeListRecord struct {
NodeUID string `json:"nodeUID,omitempty"`
NodeCPUUsage float64 `json:"nodeCPUUsage"`
NodeCPUAllocatable float64 `json:"nodeCPUAllocatable"`
NodeMemoryUsage float64 `json:"nodeMemoryUsage"`
NodeMemoryAllocatable float64 `json:"nodeMemoryAllocatable"`
Meta map[string]string `json:"meta"`
NodeUID string `json:"nodeUID,omitempty"`
NodeCPUUsage float64 `json:"nodeCPUUsage"`
NodeCPUAllocatable float64 `json:"nodeCPUAllocatable"`
NodeMemoryUsage float64 `json:"nodeMemoryUsage"`
NodeMemoryAllocatable float64 `json:"nodeMemoryAllocatable"`
CountByCondition NodeCountByCondition `json:"countByCondition"`
Meta map[string]string `json:"meta"`
}
type NamespaceListRequest struct {
@@ -180,6 +187,7 @@ type NamespaceListRecord struct {
NamespaceName string `json:"namespaceName"`
CPUUsage float64 `json:"cpuUsage"`
MemoryUsage float64 `json:"memoryUsage"`
CountByPhase PodCountByPhase `json:"countByPhase"`
Meta map[string]string `json:"meta"`
}
@@ -329,3 +337,30 @@ type JobListRecord struct {
SuccessfulPods int `json:"successfulPods"`
Meta map[string]string `json:"meta"`
}
type VolumeListRequest struct {
Start int64 `json:"start"` // epoch time in ms
End int64 `json:"end"` // epoch time in ms
Filters *v3.FilterSet `json:"filters"`
GroupBy []v3.AttributeKey `json:"groupBy"`
OrderBy *v3.OrderBy `json:"orderBy"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
type VolumeListResponse struct {
Type ResponseType `json:"type"`
Records []VolumeListRecord `json:"records"`
Total int `json:"total"`
}
type VolumeListRecord struct {
PersistentVolumeClaimName string `json:"persistentVolumeClaimName"`
VolumeAvailable float64 `json:"volumeAvailable"`
VolumeCapacity float64 `json:"volumeCapacity"`
VolumeInodes float64 `json:"volumeInodes"`
VolumeInodesFree float64 `json:"volumeInodesFree"`
VolumeInodesUsed float64 `json:"volumeInodesUsed"`
VolumeUsage float64 `json:"volumeUsage"`
Meta map[string]string `json:"meta"`
}

View File

@@ -770,6 +770,19 @@ type MetricTableHints struct {
SamplesTableName string
}
type MetricValueFilter struct {
Value float64
}
func (m *MetricValueFilter) Clone() *MetricValueFilter {
if m == nil {
return nil
}
return &MetricValueFilter{
Value: m.Value,
}
}
type BuilderQuery struct {
QueryName string `json:"queryName"`
StepInterval int64 `json:"stepInterval"`
@@ -795,7 +808,8 @@ type BuilderQuery struct {
ShiftBy int64
IsAnomaly bool
QueriesUsedInFormula []string
MetricTableHints *MetricTableHints `json:"-"`
MetricTableHints *MetricTableHints `json:"-"`
MetricValueFilter *MetricValueFilter `json:"-"`
}
func (b *BuilderQuery) SetShiftByFromFunc() {
@@ -859,6 +873,7 @@ func (b *BuilderQuery) Clone() *BuilderQuery {
ShiftBy: b.ShiftBy,
IsAnomaly: b.IsAnomaly,
QueriesUsedInFormula: b.QueriesUsedInFormula,
MetricValueFilter: b.MetricValueFilter.Clone(),
}
}

View File

@@ -5,7 +5,7 @@ Follow the steps in this section to install a sample application named HotR.O.D,
```console
kubectl create ns sample-application
kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod.yaml
```
In case, you have installed SigNoz in namespace other than `platform` or selected Helm release name other than `my-release`, follow the steps below:
@@ -15,7 +15,7 @@ export HELM_RELEASE=my-release-2
export SIGNOZ_NAMESPACE=platform-2
export HOTROD_NAMESPACE=sample-application-2
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh | bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh | bash
```
To delete sample application:
@@ -23,7 +23,7 @@ To delete sample application:
```console
export HOTROD_NAMESPACE=sample-application-2
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh | bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh | bash
```
For testing with local scripts, you can use the following commands:

View File

@@ -7,7 +7,7 @@ HOTROD_NAMESPACE=${HOTROD_NAMESPACE:-"sample-application"}
if [[ "${HOTROD_NAMESPACE}" == "default" || "${HOTROD_NAMESPACE}" == "kube-system" || "${HOTROD_NAMESPACE}" == "platform" ]]; then
echo "Default k8s namespace and SigNoz namespace must not be deleted"
echo "Deleting components only"
kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-template.yaml)
kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml)
else
echo "Delete HotROD sample app namespace ${HOTROD_NAMESPACE}"
kubectl delete namespace "${HOTROD_NAMESPACE}"

View File

@@ -37,7 +37,7 @@ kubectl create namespace "$HOTROD_NAMESPACE" --save-config --dry-run -o yaml 2>/
# Setup sample apps into specified namespace
kubectl apply --namespace="${HOTROD_NAMESPACE}" -f <( \
(cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-template.yaml) | \
(cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml) | \
HOTROD_NAMESPACE="${HOTROD_NAMESPACE}" \
HOTROD_IMAGE="${HOTROD_IMAGE}" \
LOCUST_IMAGE="${LOCUST_IMAGE}" \