Compare commits

..

16 Commits

Author SHA1 Message Date
Prashant Shahi
bb2a60565b Merge branch 'main' into chore/sync-develop 2024-12-18 18:59:20 +05:30
Prashant Shahi
823f84f857 Merge pull request #6664 from SigNoz/release/v0.64.x
Release/v0.64.x
2024-12-18 18:29:05 +05:30
Prashant Shahi
8a4d45084d chore(signoz): pin versions: SigNoz 0.64.0
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-18 17:57:42 +05:30
Prashant Shahi
5bc6c33899 Merge branch 'main' into release/v0.64.x 2024-12-18 17:55:57 +05:30
Prashant Shahi
73fc5e45e5 Merge branch 'develop' into chore/sync-develop 2024-12-18 17:53:50 +05:30
Prashant Shahi
799c33ff82 Merge branch 'main' into chore/sync-develop 2024-12-18 17:46:44 +05:30
Prashant Shahi
46bc7c7a21 Merge pull request #6662 from SigNoz/release/v0.63.x
Release/v0.63.x
2024-12-18 15:41:24 +05:30
Prashant Shahi
6d9741c3a4 chore(signoz): pin versions: SigNoz 0.63.0, SigNoz OtelCollector 0.111.16
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-18 15:25:20 +05:30
Prashant Shahi
610a8ec704 Merge branch 'main' into release/v0.63.x 2024-12-18 15:07:57 +05:30
Prashant Shahi
e1b8205a16 Merge branch 'develop' into chore/sync-develop 2024-12-13 18:36:01 +05:30
Prashant Shahi
e21b9a561d chore: deprecate develop branch - use main
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-13 18:08:36 +05:30
Prashant Shahi
c4858ec829 Merge branch 'main' into chore/sync-develop 2024-12-13 17:46:01 +05:30
Prashant Shahi
2b5a0ec496 Merge pull request #6625 from SigNoz/release/v0.62.x
Release/v0.62.x
2024-12-12 21:02:17 +05:30
Prashant Shahi
a9440c010c chore(signoz): pin versions: SigNoz 0.62.0, SigNoz OtelCollector 0.111.15
Signed-off-by: Prashant Shahi <prashant@signoz.io>
2024-12-12 15:28:09 +05:30
Prashant Shahi
f9e7eff357 Merge branch 'main' into release/v0.62.x 2024-12-12 15:22:47 +05:30
Prashant Shahi
47d8c9e3e7 Merge pull request #6593 from SigNoz/release-sync/v0.61.x
Release Sync/v0.61.x
2024-12-04 21:28:47 +05:30
27 changed files with 60 additions and 125 deletions

View File

@@ -3,7 +3,6 @@ name: build-pipeline
on:
pull_request:
branches:
- develop
- main
- release/v*

View File

@@ -3,7 +3,7 @@ name: "Update PR labels and Block PR until related docs are shipped for the feat
on:
pull_request:
branches:
- develop
- main
types: [opened, edited, labeled, unlabeled]
permissions:

View File

@@ -42,7 +42,7 @@ jobs:
kubectl create ns sample-application
# apply hotrod k8s manifest file
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
# wait for all deployments in sample-application namespace to be READY
kubectl -n sample-application get deploy --output name | xargs -r -n1 -t kubectl -n sample-application rollout status --timeout=300s

View File

@@ -2,7 +2,8 @@ name: Jest Coverage - changed files
on:
pull_request:
branches: develop
branches:
- main
jobs:
build:
@@ -11,7 +12,7 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
ref: "refs/heads/develop"
ref: "refs/heads/main"
token: ${{ secrets.GITHUB_TOKEN }} # Provide the GitHub token for authentication
- name: Fetch branch

View File

@@ -4,7 +4,6 @@ on:
push:
branches:
- main
- develop
tags:
- v*

View File

@@ -3,7 +3,6 @@ on:
pull_request:
branches:
- main
- develop
paths:
- 'frontend/**'
defaults:

View File

@@ -1,12 +1,12 @@
name: staging-deployment
# Trigger deployment only on push to develop branch
# Trigger deployment only on push to main branch
on:
push:
branches:
- develop
- main
jobs:
deploy:
name: Deploy latest develop branch to staging
name: Deploy latest main branch to staging
runs-on: ubuntu-latest
environment: staging
permissions:

View File

@@ -44,7 +44,7 @@ jobs:
git add .
git stash push -m "stashed on $(date --iso-8601=seconds)"
git fetch origin
git checkout develop
git checkout main
git pull
# This is added to include the scenerio when new commit in PR is force-pushed
git branch -D ${GITHUB_BRANCH}

View File

@@ -339,7 +339,7 @@ to make SigNoz UI available at [localhost:3301](http://localhost:3301)
**5.1.1 To install the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh \
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh \
| HELM_RELEASE=my-release SIGNOZ_NAMESPACE=platform bash
```
@@ -362,7 +362,7 @@ kubectl -n sample-application run strzal --image=djbingham/curl \
**5.1.4 To delete the HotROD sample app:**
```bash
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh \
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh \
| HOTROD_NAMESPACE=sample-application bash
```

View File

@@ -58,7 +58,7 @@ from the HotROD application, you should see the data generated from hotrod in Si
```sh
kubectl create ns sample-application
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/main/sample-apps/hotrod/hotrod.yaml
```
To generate load:

View File

@@ -146,7 +146,7 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.61.0
image: signoz/query-service:0.64.0
command:
[
"-config=/root/config/prometheus.yml",
@@ -187,7 +187,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:0.61.0
image: signoz/frontend:0.64.0
deploy:
restart_policy:
condition: on-failure
@@ -200,7 +200,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.111.14
image: signoz/signoz-otel-collector:0.111.16
command:
[
"--config=/etc/otel-collector-config.yaml",
@@ -238,7 +238,7 @@ services:
- query-service
otel-collector-migrator:
image: signoz/signoz-schema-migrator:0.111.14
image: signoz/signoz-schema-migrator:0.111.16
deploy:
restart_policy:
condition: on-failure

View File

@@ -69,7 +69,7 @@ services:
- --storage.path=/data
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator
command:
- "sync"
@@ -86,7 +86,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: signoz-otel-collector
image: signoz/signoz-otel-collector:0.111.14
image: signoz/signoz-otel-collector:0.111.16
command:
[
"--config=/etc/otel-collector-config.yaml",

View File

@@ -162,7 +162,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.61.0}
image: signoz/query-service:${DOCKER_TAG:-0.64.0}
container_name: signoz-query-service
command:
[
@@ -202,7 +202,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.61.0}
image: signoz/frontend:${DOCKER_TAG:-0.64.0}
container_name: signoz-frontend
restart: on-failure
depends_on:
@@ -214,7 +214,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator-sync:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator-sync
command:
- "sync"
@@ -229,7 +229,7 @@ services:
# condition: service_healthy
otel-collector-migrator-async:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator-async
command:
- "async"
@@ -246,7 +246,7 @@ services:
# condition: service_healthy
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16}
container_name: signoz-otel-collector
command:
[

View File

@@ -167,7 +167,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.61.0}
image: signoz/query-service:${DOCKER_TAG:-0.64.0}
container_name: signoz-query-service
command:
[
@@ -209,7 +209,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.61.0}
image: signoz/frontend:${DOCKER_TAG:-0.64.0}
container_name: signoz-frontend
restart: on-failure
depends_on:
@@ -221,7 +221,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.111.16}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
@@ -235,7 +235,7 @@ services:
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.14}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.111.16}
container_name: signoz-otel-collector
command:
[

View File

@@ -13,8 +13,3 @@ if [ "$branch" = "main" ]; then
echo "${color_red}${bold}You can't commit directly to the main branch${reset}"
exit 1
fi
if [ "$branch" = "develop" ]; then
echo "${color_red}${bold}You can't commit directly to the develop branch${reset}"
exit 1
fi

2
go.mod
View File

@@ -8,7 +8,7 @@ require (
github.com/ClickHouse/clickhouse-go/v2 v2.25.0
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
github.com/SigNoz/signoz-otel-collector v0.111.14
github.com/SigNoz/signoz-otel-collector v0.111.16
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974
github.com/antonmedv/expr v1.15.3

4
go.sum
View File

@@ -70,8 +70,8 @@ github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd h1:Bk43AsDYe0fhkb
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd/go.mod h1:nxRcH/OEdM8QxzH37xkGzomr1O0JpYBRS6pwjsWW6Pc=
github.com/SigNoz/prometheus v1.12.0 h1:+BXeIHyMOOWWa+xjhJ+x80JFva7r1WzWIfIhQ5PUmIE=
github.com/SigNoz/prometheus v1.12.0/go.mod h1:EqNM27OwmPfqMUk+E+XG1L9rfDFcyXnzzDrg0EPOfxA=
github.com/SigNoz/signoz-otel-collector v0.111.14 h1:nvRucNK/TTtZKM3Dsr/UNx+LwkjaGwx0yPlMvGw/4j0=
github.com/SigNoz/signoz-otel-collector v0.111.14/go.mod h1:vRDT10om89DHybN7SRMlt8IN9+/pgh1D57pNHPr2LM4=
github.com/SigNoz/signoz-otel-collector v0.111.16 h1:535uKH5Oux+35EsI+L3C6pnAP/Ye0PTCbVizXoL+VqE=
github.com/SigNoz/signoz-otel-collector v0.111.16/go.mod h1:HJ4m0LY1MPsuZmuRF7Ixb+bY8rxgRzI0VXzOedESsjg=
github.com/SigNoz/zap_otlp v0.1.0 h1:T7rRcFN87GavY8lDGZj0Z3Xv6OhJA6Pj3I9dNPmqvRc=
github.com/SigNoz/zap_otlp v0.1.0/go.mod h1:lcHvbDbRgvDnPxo9lDlaL1JK2PyOyouP/C3ynnYIvyo=
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974 h1:PKVgdf83Yw+lZJbFtNGBgqXiXNf3+kOXW2qZ7Ms7OaY=

View File

@@ -6,8 +6,6 @@ import (
func generateConsumerSQL(start, end int64, topic, partition, consumerGroup, queueType string) string {
timeRange := (end - start) / 1000000000
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000
query := fmt.Sprintf(`
WITH consumer_query AS (
SELECT
@@ -20,8 +18,6 @@ WITH consumer_query AS (
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d'
AND kind = 5
AND attribute_string_messaging$$system = '%s'
AND attributes_string['messaging.destination.name'] = '%s'
@@ -40,15 +36,13 @@ FROM
consumer_query
ORDER BY
resource_string_service$$name;
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, consumerGroup, timeRange)
`, start, end, queueType, topic, partition, consumerGroup, timeRange)
return query
}
// S1 landing
func generatePartitionLatencySQL(start, end int64, queueType string) string {
timeRange := (end - start) / 1000000000
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000
query := fmt.Sprintf(`
WITH partition_query AS (
SELECT
@@ -60,8 +54,6 @@ WITH partition_query AS (
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d'
AND kind = 4
AND attribute_string_messaging$$system = '%s'
GROUP BY topic, partition
@@ -76,15 +68,13 @@ FROM
partition_query
ORDER BY
topic;
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange)
`, start, end, queueType, timeRange)
return query
}
// S1 consumer
func generateConsumerPartitionLatencySQL(start, end int64, topic, partition, queueType string) string {
timeRange := (end - start) / 1000000000
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000
query := fmt.Sprintf(`
WITH consumer_pl AS (
SELECT
@@ -97,8 +87,6 @@ WITH consumer_pl AS (
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d'
AND kind = 5
AND attribute_string_messaging$$system = '%s'
AND attributes_string['messaging.destination.name'] = '%s'
@@ -116,15 +104,14 @@ FROM
consumer_pl
ORDER BY
consumer_group;
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, timeRange)
`, start, end, queueType, topic, partition, timeRange)
return query
}
// S3, producer overview
func generateProducerPartitionThroughputSQL(start, end int64, queueType string) string {
timeRange := (end - start) / 1000000000
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000 // t, svc, rps, byte*, p99, err
// t, svc, rps, byte*, p99, err
query := fmt.Sprintf(`
WITH producer_latency AS (
SELECT
@@ -137,8 +124,6 @@ WITH producer_latency AS (
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d'
AND kind = 4
AND attribute_string_messaging$$system = '%s'
GROUP BY topic, resource_string_service$$name
@@ -152,15 +137,13 @@ SELECT
COALESCE(total_requests / %d, 0) AS throughput
FROM
producer_latency
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange)
`, start, end, queueType, timeRange)
return query
}
// S3, producer topic/service overview
func generateProducerTopicLatencySQL(start, end int64, topic, service, queueType string) string {
timeRange := (end - start) / 1000000000
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000
query := fmt.Sprintf(`
WITH consumer_latency AS (
SELECT
@@ -172,8 +155,6 @@ WITH consumer_latency AS (
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d'
AND kind = 4
AND resource_string_service$$name = '%s'
AND attribute_string_messaging$$system = '%s'
@@ -188,15 +169,13 @@ SELECT
COALESCE(total_requests / %d, 0) AS throughput
FROM
consumer_latency
`, start, end, tsBucketStart, tsBucketEnd, service, queueType, topic, timeRange)
`, start, end, service, queueType, topic, timeRange)
return query
}
// S3 consumer overview
func generateConsumerLatencySQL(start, end int64, queueType string) string {
timeRange := (end - start) / 1000000000
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000
query := fmt.Sprintf(`
WITH consumer_latency AS (
SELECT
@@ -210,8 +189,6 @@ WITH consumer_latency AS (
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d'
AND kind = 5
AND attribute_string_messaging$$system = '%s'
GROUP BY topic, resource_string_service$$name
@@ -228,15 +205,13 @@ FROM
consumer_latency
ORDER BY
topic;
`, start, end, tsBucketStart, tsBucketEnd, queueType, timeRange, timeRange)
`, start, end, queueType, timeRange, timeRange)
return query
}
// S3 consumer topic/service
func generateConsumerServiceLatencySQL(start, end int64, topic, service, queueType string) string {
timeRange := (end - start) / 1000000000
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000
query := fmt.Sprintf(`
WITH consumer_latency AS (
SELECT
@@ -248,8 +223,6 @@ WITH consumer_latency AS (
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d'
AND kind = 5
AND resource_string_service$$name = '%s'
AND attribute_string_messaging$$system = '%s'
@@ -264,7 +237,7 @@ SELECT
COALESCE(total_requests / %d, 0) AS throughput
FROM
consumer_latency
`, start, end, tsBucketStart, tsBucketEnd, service, queueType, topic, timeRange)
`, start, end, service, queueType, topic, timeRange)
return query
}
@@ -320,8 +293,6 @@ GROUP BY
func generateProducerSQL(start, end int64, topic, partition, queueType string) string {
timeRange := (end - start) / 1000000000
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000
query := fmt.Sprintf(`
WITH producer_query AS (
SELECT
@@ -333,8 +304,6 @@ WITH producer_query AS (
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d'
AND kind = 4
AND attribute_string_messaging$$system = '%s'
AND attributes_string['messaging.destination.name'] = '%s'
@@ -351,39 +320,33 @@ FROM
producer_query
ORDER BY
resource_string_service$$name;
`, start, end, tsBucketStart, tsBucketEnd, queueType, topic, partition, timeRange)
`, start, end, queueType, topic, partition, timeRange)
return query
}
func generateNetworkLatencyThroughputSQL(start, end int64, consumerGroup, partitionID, queueType string) string {
timeRange := (end - start) / 1000000000
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000
query := fmt.Sprintf(`
SELECT
attributes_string['messaging.client_id'] AS client_id,
resources_string['service.instance.id'] AS service_instance_id,
attributes_string['service.instance.id'] AS service_instance_id,
resource_string_service$$name AS service_name,
count(*) / %d AS throughput
FROM signoz_traces.distributed_signoz_index_v3
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d'
AND kind = 5
AND attribute_string_messaging$$system = '%s'
AND attributes_string['messaging.kafka.consumer.group'] = '%s'
AND attributes_string['messaging.destination.partition.id'] = '%s'
GROUP BY service_name, client_id, service_instance_id
ORDER BY throughput DESC
`, timeRange, start, end, tsBucketStart, tsBucketEnd, queueType, consumerGroup, partitionID)
`, timeRange, start, end, queueType, consumerGroup, partitionID)
return query
}
func onboardProducersSQL(start, end int64, queueType string) string {
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000
query := fmt.Sprintf(`
SELECT
COUNT(*) = 0 AS entries,
@@ -395,15 +358,11 @@ FROM
signoz_traces.distributed_signoz_index_v3
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d';`, queueType, start, end, tsBucketStart, tsBucketEnd)
AND timestamp <= '%d';`, queueType, start, end)
return query
}
func onboardConsumerSQL(start, end int64, queueType string) string {
tsBucketStart := (start / 1000000000) - 1800
tsBucketEnd := end / 1000000000
query := fmt.Sprintf(`
SELECT
COUNT(*) = 0 AS entries,
@@ -415,12 +374,10 @@ SELECT
COUNT(IF(has(attributes_string, 'messaging.kafka.consumer.group'), 1, NULL)) = 0 AS cgroup,
COUNT(IF(has(attributes_number, 'messaging.message.body.size'), 1, NULL)) = 0 AS bodysize,
COUNT(IF(has(attributes_string, 'messaging.client_id'), 1, NULL)) = 0 AS clientid,
COUNT(IF(has(resources_string, 'service.instance.id'), 1, NULL)) = 0 AS instanceid
COUNT(IF(has(attributes_string, 'service.instance.id'), 1, NULL)) = 0 AS instanceid
FROM signoz_traces.distributed_signoz_index_v3
WHERE
timestamp >= '%d'
AND timestamp <= '%d'
AND ts_bucket_start >= '%d'
AND ts_bucket_start <= '%d' ;`, queueType, start, end, tsBucketStart, tsBucketEnd)
AND timestamp <= '%d';`, queueType, start, end)
return query
}

View File

@@ -190,7 +190,7 @@ func (q *querier) runBuilderQuery(
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
return
}
query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1)
query = fmt.Sprintf(placeholderQuery, limitQuery)
} else {
query, err = tracesQueryBuilder(
start,

View File

@@ -190,7 +190,7 @@ func (q *querier) runBuilderQuery(
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
return
}
query = strings.Replace(placeholderQuery, "#LIMIT_PLACEHOLDER", limitQuery, 1)
query = fmt.Sprintf(placeholderQuery, limitQuery)
} else {
query, err = tracesQueryBuilder(
start,

View File

@@ -34,8 +34,8 @@ func enrichKeyWithMetadata(key v3.AttributeKey, keys map[string]v3.AttributeKey)
return v
}
for _, tkey := range utils.GenerateEnrichmentKeys(key) {
if val, ok := keys[tkey]; ok {
for _, key := range utils.GenerateEnrichmentKeys(key) {
if val, ok := keys[key]; ok {
return val
}
}

View File

@@ -74,19 +74,6 @@ func getSelectLabels(groupBy []v3.AttributeKey) string {
return strings.Join(labels, ",")
}
// TODO(nitya): use the _exists columns as well in the future similar to logs
func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (string, error) {
if key.DataType == v3.AttributeKeyDataTypeString {
if op == v3.FilterOperatorExists {
return fmt.Sprintf("%s %s ''", getColumnName(key), tracesOperatorMappingV3[v3.FilterOperatorNotEqual]), nil
} else {
return fmt.Sprintf("%s %s ''", getColumnName(key), tracesOperatorMappingV3[v3.FilterOperatorEqual]), nil
}
} else {
return "", fmt.Errorf("unsupported operation, exists and not exists can only be applied on custom attributes or string type columns")
}
}
func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
var conditions []string
@@ -123,7 +110,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
if item.Key.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(item.Key, item.Operator)
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(item.Key, item.Operator)
if err != nil {
return "", err
}
@@ -325,7 +312,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
}
if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "#LIMIT_PLACEHOLDER)"
filterSubQuery = filterSubQuery + " AND " + fmt.Sprintf("(%s) GLOBAL IN (", tracesV3.GetSelectKeys(mq.AggregateOperator, mq.GroupBy)) + "%s)"
}
switch mq.AggregateOperator {
@@ -363,7 +350,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
case v3.AggregateOperatorCount:
if mq.AggregateAttribute.Key != "" {
if mq.AggregateAttribute.IsColumn {
subQuery, err := existsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
subQuery, err := tracesV3.ExistsSubQueryForFixedColumn(mq.AggregateAttribute, v3.FilterOperatorExists)
if err == nil {
filterSubQuery = fmt.Sprintf("%s AND %s", filterSubQuery, subQuery)
}

View File

@@ -265,11 +265,9 @@ func Test_buildTracesFilterQuery(t *testing.T) {
{Key: v3.AttributeKey{Key: "isDone", DataType: v3.AttributeKeyDataTypeBool, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
{Key: v3.AttributeKey{Key: "host1", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Operator: v3.FilterOperatorNotExists},
{Key: v3.AttributeKey{Key: "path", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
{Key: v3.AttributeKey{Key: "http_url", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
{Key: v3.AttributeKey{Key: "http.route", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag, IsColumn: true}, Operator: v3.FilterOperatorNotExists},
}},
},
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND `attribute_string_path` = '' AND http_url = '' AND `attribute_string_http$$route` = ''",
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND path = ''",
},
}
for _, tt := range tests {
@@ -685,7 +683,7 @@ func TestPrepareTracesQuery(t *testing.T) {
},
},
want: "SELECT attributes_string['function'] as `function`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 where " +
"(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `function` order by value DESC",
"(timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND mapContains(attributes_string, 'function') AND (`function`) GLOBAL IN (%s) group by `function` order by value DESC",
},
{
name: "test with limit with resources- first",
@@ -768,7 +766,7 @@ func TestPrepareTracesQuery(t *testing.T) {
want: "SELECT `attribute_string_function` as `function`, serviceName as `serviceName`, toFloat64(count(distinct(name))) as value from signoz_traces.distributed_signoz_index_v3 " +
"where (timestamp >= '1680066360726210000' AND timestamp <= '1680066458000000000') AND (ts_bucket_start >= 1680064560 AND ts_bucket_start <= 1680066458) AND attributes_number['line'] = 100 " +
"AND (resource_fingerprint GLOBAL IN (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE (seen_at_ts_bucket_start >= 1680064560) AND (seen_at_ts_bucket_start <= 1680066458) " +
"AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (#LIMIT_PLACEHOLDER) group by `function`,`serviceName` order by value DESC",
"AND simpleJSONExtractString(labels, 'hostname') = 'server1' AND labels like '%hostname%server1%')) AND (`function`,`serviceName`) GLOBAL IN (%s) group by `function`,`serviceName` order by value DESC",
},
}

View File

@@ -90,7 +90,7 @@ func EnableHostsInfraMonitoring() bool {
return GetOrDefaultEnv("ENABLE_INFRA_METRICS", "true") == "true"
}
var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "false")
var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "true")
func IsDurationSortFeatureEnabled() bool {
isDurationSortFeatureEnabledStr := DurationSortFeature

View File

@@ -5,7 +5,7 @@ Follow the steps in this section to install a sample application named HotR.O.D,
```console
kubectl create ns sample-application
kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod.yaml
kubectl -n sample-application apply -f https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod.yaml
```
In case, you have installed SigNoz in namespace other than `platform` or selected Helm release name other than `my-release`, follow the steps below:
@@ -15,7 +15,7 @@ export HELM_RELEASE=my-release-2
export SIGNOZ_NAMESPACE=platform-2
export HOTROD_NAMESPACE=sample-application-2
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-install.sh | bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-install.sh | bash
```
To delete sample application:
@@ -23,7 +23,7 @@ To delete sample application:
```console
export HOTROD_NAMESPACE=sample-application-2
curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-delete.sh | bash
curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-delete.sh | bash
```
For testing with local scripts, you can use the following commands:

View File

@@ -7,7 +7,7 @@ HOTROD_NAMESPACE=${HOTROD_NAMESPACE:-"sample-application"}
if [[ "${HOTROD_NAMESPACE}" == "default" || "${HOTROD_NAMESPACE}" == "kube-system" || "${HOTROD_NAMESPACE}" == "platform" ]]; then
echo "Default k8s namespace and SigNoz namespace must not be deleted"
echo "Deleting components only"
kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-template.yaml)
kubectl delete --namespace="${HOTROD_NAMESPACE}" -f <(cat hotrod-template.yaml || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml)
else
echo "Delete HotROD sample app namespace ${HOTROD_NAMESPACE}"
kubectl delete namespace "${HOTROD_NAMESPACE}"

View File

@@ -37,7 +37,7 @@ kubectl create namespace "$HOTROD_NAMESPACE" --save-config --dry-run -o yaml 2>/
# Setup sample apps into specified namespace
kubectl apply --namespace="${HOTROD_NAMESPACE}" -f <( \
(cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/develop/sample-apps/hotrod/hotrod-template.yaml) | \
(cat hotrod-template.yaml 2>/dev/null || curl -sL https://github.com/SigNoz/signoz/raw/main/sample-apps/hotrod/hotrod-template.yaml) | \
HOTROD_NAMESPACE="${HOTROD_NAMESPACE}" \
HOTROD_IMAGE="${HOTROD_IMAGE}" \
LOCUST_IMAGE="${LOCUST_IMAGE}" \