Compare commits
28 Commits
licensing
...
chore/rest
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c9f5b16c37 | ||
|
|
55837dacb5 | ||
|
|
14248968cb | ||
|
|
a436ae900c | ||
|
|
5dd02a5b8e | ||
|
|
c0f01e4cb9 | ||
|
|
fed84cb50a | ||
|
|
80545c4d07 | ||
|
|
0b1faec092 | ||
|
|
ba6f31b1c3 | ||
|
|
eed92978a4 | ||
|
|
41cbd316b5 | ||
|
|
8d7d33393d | ||
|
|
8d143b44b1 | ||
|
|
423aebd6eb | ||
|
|
8d630707af | ||
|
|
a5b52431b7 | ||
|
|
0138d757c8 | ||
|
|
844195b84f | ||
|
|
8ff05b2e8f | ||
|
|
c8c56c544e | ||
|
|
1c43655336 | ||
|
|
c269c8c6b8 | ||
|
|
3142b6cc6d | ||
|
|
58e141685a | ||
|
|
e17f63a50c | ||
|
|
838ef5dcc5 | ||
|
|
e53d3d1269 |
81
.github/workflows/build-community.yaml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: build-community
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_NAME: signoz-community
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./pkg/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: dockerhub
|
||||
113
.github/workflows/build-enterprise.yaml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
name: build-enterprise
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
- name: set-docker-providers
|
||||
id: set-docker-providers
|
||||
run: |
|
||||
if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ || ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
|
||||
echo "providers=dockerhub gcp" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "providers=gcp" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
||||
131
.github/workflows/build-staging.yaml
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
name: build-staging
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
deployment: ${{ steps.build-info.outputs.deployment }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
|
||||
staging_label="${{ github.event.label.name }}"
|
||||
if [[ "${staging_label}" == "staging:"* ]]; then
|
||||
deployment=${staging_label#"staging:"}
|
||||
elif [[ "${{ github.event.ref }}" == "refs/heads/main" ]]; then
|
||||
deployment="staging"
|
||||
else
|
||||
echo "error: not able to determine deployment - please verify the PR label or the branch"
|
||||
exit 1
|
||||
fi
|
||||
echo "deployment=${deployment}" >> $GITHUB_OUTPUT
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: gcp
|
||||
staging:
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||
secrets: inherit
|
||||
needs: [prepare, go-build]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_ENVIRONMENT: staging
|
||||
GITHUB_SILENT: true
|
||||
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
|
||||
GITHUB_EVENT_NAME: releaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"
|
||||
122
.github/workflows/build.yaml
vendored
@@ -1,122 +0,0 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
enterprise:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: docker-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: create-env-file
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: github-ref-info
|
||||
shell: bash
|
||||
run: |
|
||||
GH_REF=${{ github.ref }}
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
PREFIX="refs/tags/"
|
||||
echo "GH_IS_TAG=true" >> $GITHUB_ENV
|
||||
echo "GH_TAG=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
else
|
||||
PREFIX="refs/heads/"
|
||||
echo "GH_IS_TAG=false" >> $GITHUB_ENV
|
||||
echo "GH_BRANCH_NAME=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: set-version
|
||||
run: |
|
||||
if [ '${{ env.GH_IS_TAG }}' == 'true' ]; then
|
||||
echo "VERSION=${{ env.GH_TAG }}" >> $GITHUB_ENV
|
||||
elif [ '${{ env.GH_BRANCH_NAME }}' == 'main' ]; then
|
||||
echo "VERSION=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "VERSION=${{ env.GH_BRANCH_NAME }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: cross-compilation-tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: publish
|
||||
run: make docker-buildx-enterprise
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: docker-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: github-ref-info
|
||||
shell: bash
|
||||
run: |
|
||||
GH_REF=${{ github.ref }}
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
PREFIX="refs/tags/"
|
||||
echo "GH_IS_TAG=true" >> $GITHUB_ENV
|
||||
echo "GH_TAG=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
else
|
||||
PREFIX="refs/heads/"
|
||||
echo "GH_IS_TAG=false" >> $GITHUB_ENV
|
||||
echo "GH_BRANCH_NAME=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: set-version
|
||||
run: |
|
||||
if [ '${{ env.GH_IS_TAG }}' == 'true' ]; then
|
||||
echo "VERSION=${{ env.GH_TAG }}" >> $GITHUB_ENV
|
||||
elif [ '${{ env.GH_BRANCH_NAME }}' == 'main' ]; then
|
||||
echo "VERSION=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "VERSION=${{ env.GH_BRANCH_NAME }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: cross-compilation-tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: publish
|
||||
run: make docker-buildx-community
|
||||
13
.github/workflows/staging-deployment.yaml
vendored
@@ -36,12 +36,17 @@ jobs:
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
export KAFKA_SPAN_EVAL="true"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
docker pull signoz/signoz-schema-migrator:main
|
||||
docker system prune --force --all
|
||||
OTELCOL_TAG=$(curl -s https://api.github.com/repos/SigNoz/signoz-otel-collector/releases/latest | jq -r '.tag_name // "not-found"')
|
||||
if [[ "${OTELCOL_TAG}" == "not-found" ]]; then
|
||||
echo "warning: unable to determine latest SigNoz OtelCollector release tag, skipping latest otelcol deployment"
|
||||
else
|
||||
export OTELCOL_TAG=${OTELCOL_TAG}
|
||||
docker pull signoz/signoz-otel-collector:${OTELCOL_TAG}
|
||||
docker pull signoz/signoz-schema-migrator:${OTELCOL_TAG}
|
||||
fi
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
2
.github/workflows/testing-deployment.yaml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export DEV_BUILD="1"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
docker system prune --force
|
||||
docker system prune --force --all
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
17
.versions/alpine
Normal file
@@ -0,0 +1,17 @@
|
||||
#### Auto generated by make docker-version-alpine. DO NOT EDIT! ####
|
||||
amd64=029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85
|
||||
unknown=5fea95373b9ec85974843f31446fa6a9df4492dddae4e1cb056193c34a20a5be
|
||||
arm=b4aef1a899e0271f06d948c9a8fa626ecdb2202d3a178bc14775dd559e23df8e
|
||||
unknown=a4d1e27e63a9d6353046eb25a2f0ec02945012b217f4364cd83a73fe6dfb0b15
|
||||
arm=4fdafe217d0922f3c3e2b4f64cf043f8403a4636685cd9c51fea2cbd1f419740
|
||||
unknown=7f21ac2018d95b2c51a5779c1d5ca6c327504adc3b0fdc747a6725d30b3f13c2
|
||||
arm64=ea3c5a9671f7b3f7eb47eab06f73bc6591df978b0d5955689a9e6f943aa368c0
|
||||
unknown=a8ba68c1a9e6eea8041b4b8f996c235163440808b9654a865976fdcbede0f433
|
||||
386=dea9f02e103e837849f984d5679305c758aba7fea1b95b7766218597f61a05ab
|
||||
unknown=3c6629bec05c8273a927d46b77428bf4a378dad911a0ae284887becdc149b734
|
||||
ppc64le=0880443bffa028dfbbc4094a32dd6b7ac25684e4c0a3d50da9e0acae355c5eaf
|
||||
unknown=bb48308f976b266e3ab39bbf9af84521959bd9c295d3c763690cf41f8df2a626
|
||||
riscv64=d76e6fbe348ff20c2931bb7f101e49379648e026de95dd37f96e00ce1909dcf7
|
||||
unknown=dd807544365f6dc187cbe6de0806adce2ea9de3e7124717d1d8e8b7a18b77b64
|
||||
s390x=b815fadf80495594eb6296a6af0bc647ae5f193e0044e07acec7e5b378c9ce2d
|
||||
unknown=74681be74a280a88abb53ff1e048eb1fb624b30d0066730df6d8afd02ba82e01
|
||||
@@ -174,7 +174,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.77.0
|
||||
image: signoz/signoz:v0.78.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
@@ -208,7 +208,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.37
|
||||
image: signoz/signoz-otel-collector:v0.111.38
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -232,7 +232,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.37
|
||||
image: signoz/signoz-schema-migrator:v0.111.38
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.77.0
|
||||
image: signoz/signoz:v0.78.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
@@ -143,7 +143,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.37
|
||||
image: signoz/signoz-otel-collector:v0.111.38
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -167,7 +167,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.37
|
||||
image: signoz/signoz-schema-migrator:v0.111.38
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -177,7 +177,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.77.0}
|
||||
image: signoz/signoz:${VERSION:-v0.78.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -212,7 +212,7 @@ services:
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -238,7 +238,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -249,7 +249,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.77.0}
|
||||
image: signoz/signoz:${VERSION:-v0.78.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -146,7 +146,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -168,7 +168,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -180,7 +180,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.77.0}
|
||||
image: signoz/signoz:${VERSION:-v0.78.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -144,7 +144,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -166,7 +166,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -178,7 +178,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -18,4 +18,4 @@ COPY frontend/build/ /etc/signoz/web/
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
22
ee/query-service/Dockerfile.multi-arch
Normal file
@@ -0,0 +1,22 @@
|
||||
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
|
||||
|
||||
FROM alpine@sha256:${ALPINE_SHA}
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG ARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY ./target/${OS}-${ARCH}/signoz /root/signoz
|
||||
COPY ./conf/prometheus.yml /root/config/prometheus.yml
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
@@ -162,12 +161,7 @@ func (m *modelDao) PrecheckLogin(ctx context.Context, email, sourceUrl string) (
|
||||
// find domain from email
|
||||
orgDomain, apierr := m.GetDomainByEmail(ctx, email)
|
||||
if apierr != nil {
|
||||
var emailDomain string
|
||||
emailComponents := strings.Split(email, "@")
|
||||
if len(emailComponents) > 0 {
|
||||
emailDomain = emailComponents[1]
|
||||
}
|
||||
zap.L().Error("failed to get org domain from email", zap.String("emailDomain", emailDomain), zap.Error(apierr.ToError()))
|
||||
zap.L().Error("failed to get org domain from email", zap.String("email", email), zap.Error(apierr.ToError()))
|
||||
return resp, apierr
|
||||
}
|
||||
|
||||
|
||||
@@ -157,8 +157,6 @@ func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
|
||||
}
|
||||
|
||||
switch planName {
|
||||
case PlanNameTeams:
|
||||
features = append(features, ProPlan...)
|
||||
case PlanNameEnterprise:
|
||||
features = append(features, EnterprisePlan...)
|
||||
case PlanNameBasic:
|
||||
|
||||
@@ -74,21 +74,21 @@ func TestNewLicenseV3(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Parse the entire license properly",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"ENTERPRISE"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"category": "FREE",
|
||||
"status": "ACTIVE",
|
||||
"valid_from": float64(1730899309),
|
||||
"valid_until": float64(-1),
|
||||
},
|
||||
PlanName: PlanNameTeams,
|
||||
PlanName: PlanNameEnterprise,
|
||||
ValidFrom: 1730899309,
|
||||
ValidUntil: -1,
|
||||
Status: "ACTIVE",
|
||||
@@ -98,14 +98,14 @@ func TestNewLicenseV3(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Fallback to basic plan if license status is invalid",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INVALID","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INVALID","plan":{"name":"ENTERPRISE"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"category": "FREE",
|
||||
"status": "INVALID",
|
||||
@@ -122,21 +122,21 @@ func TestNewLicenseV3(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "fallback states for validFrom and validUntil",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from":1234.456,"valid_until":5678.567}`),
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"ENTERPRISE"},"valid_from":1234.456,"valid_until":5678.567}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"valid_from": 1234.456,
|
||||
"valid_until": 5678.567,
|
||||
"category": "FREE",
|
||||
"status": "ACTIVE",
|
||||
},
|
||||
PlanName: PlanNameTeams,
|
||||
PlanName: PlanNameEnterprise,
|
||||
ValidFrom: 1234,
|
||||
ValidUntil: 5678,
|
||||
Status: "ACTIVE",
|
||||
|
||||
@@ -1,30 +1,26 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
const SSO = "SSO"
|
||||
const Basic = "BASIC_PLAN"
|
||||
const Pro = "PRO_PLAN"
|
||||
const Enterprise = "ENTERPRISE_PLAN"
|
||||
|
||||
var (
|
||||
PlanNameEnterprise = "ENTERPRISE"
|
||||
PlanNameTeams = "TEAMS"
|
||||
PlanNameBasic = "BASIC"
|
||||
)
|
||||
|
||||
var (
|
||||
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameTeams: Pro, PlanNameEnterprise: Enterprise}
|
||||
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameEnterprise: Enterprise}
|
||||
)
|
||||
|
||||
var (
|
||||
LicenseStatusInvalid = "INVALID"
|
||||
)
|
||||
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
const Onboarding = "ONBOARDING"
|
||||
const ChatSupport = "CHAT_SUPPORT"
|
||||
const Gateway = "GATEWAY"
|
||||
@@ -38,83 +34,6 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: DisableUpsell,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
@@ -143,135 +62,6 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
basemodel.Feature{
|
||||
Name: SSO,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: Gateway,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: PremiumSupport,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
@@ -289,76 +79,6 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
@@ -401,13 +121,6 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
|
||||
@@ -11,9 +11,12 @@ const logEvent = async (
|
||||
rateLimited?: boolean,
|
||||
): Promise<SuccessResponse<EventSuccessPayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
// add tenant_url to attributes
|
||||
const { hostname } = window.location;
|
||||
const updatedAttributes = { ...attributes, tenant_url: hostname };
|
||||
const response = await axios.post('/event', {
|
||||
eventName,
|
||||
attributes,
|
||||
attributes: updatedAttributes,
|
||||
eventType: eventType || 'track',
|
||||
rateLimited: rateLimited || false, // TODO: Update this once we have a proper way to handle rate limiting
|
||||
});
|
||||
|
||||
@@ -1,27 +1,12 @@
|
||||
// keep this consistent with backend constants.go
|
||||
export enum FeatureKeys {
|
||||
SSO = 'SSO',
|
||||
ENTERPRISE_PLAN = 'ENTERPRISE_PLAN',
|
||||
BASIC_PLAN = 'BASIC_PLAN',
|
||||
ALERT_CHANNEL_SLACK = 'ALERT_CHANNEL_SLACK',
|
||||
ALERT_CHANNEL_WEBHOOK = 'ALERT_CHANNEL_WEBHOOK',
|
||||
ALERT_CHANNEL_PAGERDUTY = 'ALERT_CHANNEL_PAGERDUTY',
|
||||
ALERT_CHANNEL_OPSGENIE = 'ALERT_CHANNEL_OPSGENIE',
|
||||
ALERT_CHANNEL_MSTEAMS = 'ALERT_CHANNEL_MSTEAMS',
|
||||
CUSTOM_METRICS_FUNCTION = 'CUSTOM_METRICS_FUNCTION',
|
||||
QUERY_BUILDER_PANELS = 'QUERY_BUILDER_PANELS',
|
||||
QUERY_BUILDER_ALERTS = 'QUERY_BUILDER_ALERTS',
|
||||
DISABLE_UPSELL = 'DISABLE_UPSELL',
|
||||
USE_SPAN_METRICS = 'USE_SPAN_METRICS',
|
||||
OSS = 'OSS',
|
||||
ONBOARDING = 'ONBOARDING',
|
||||
CHAT_SUPPORT = 'CHAT_SUPPORT',
|
||||
GATEWAY = 'GATEWAY',
|
||||
PREMIUM_SUPPORT = 'PREMIUM_SUPPORT',
|
||||
QUERY_BUILDER_SEARCH_V2 = 'QUERY_BUILDER_SEARCH_V2',
|
||||
ANOMALY_DETECTION = 'ANOMALY_DETECTION',
|
||||
AWS_INTEGRATION = 'AWS_INTEGRATION',
|
||||
ONBOARDING_V3 = 'ONBOARDING_V3',
|
||||
THIRD_PARTY_API = 'THIRD_PARTY_API',
|
||||
TRACE_FUNNELS = 'TRACE_FUNNELS',
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { Form, FormInstance, Input, Select, Switch, Typography } from 'antd';
|
||||
import { Store } from 'antd/lib/form/interface';
|
||||
import { FeatureKeys } from 'constants/features';
|
||||
import ROUTES from 'constants/routes';
|
||||
import {
|
||||
ChannelType,
|
||||
@@ -11,11 +10,8 @@ import {
|
||||
WebhookChannel,
|
||||
} from 'container/CreateAlertChannels/config';
|
||||
import history from 'lib/history';
|
||||
import { useAppContext } from 'providers/App/App';
|
||||
import { Dispatch, ReactElement, SetStateAction } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { FeatureFlagProps } from 'types/api/features/getFeaturesFlags';
|
||||
import { isFeatureKeys } from 'utils/app';
|
||||
|
||||
import EmailSettings from './Settings/Email';
|
||||
import MsTeamsSettings from './Settings/MsTeams';
|
||||
@@ -39,17 +35,6 @@ function FormAlertChannels({
|
||||
editing = false,
|
||||
}: FormAlertChannelsProps): JSX.Element {
|
||||
const { t } = useTranslation('channels');
|
||||
const { featureFlags } = useAppContext();
|
||||
|
||||
const feature = `ALERT_CHANNEL_${type.toUpperCase()}`;
|
||||
|
||||
const featureKey = isFeatureKeys(feature)
|
||||
? feature
|
||||
: FeatureKeys.ALERT_CHANNEL_SLACK;
|
||||
|
||||
const hasFeature = featureFlags?.find(
|
||||
(flag: FeatureFlagProps) => flag.name === featureKey,
|
||||
);
|
||||
|
||||
const renderSettings = (): ReactElement | null => {
|
||||
switch (type) {
|
||||
@@ -146,7 +131,7 @@ function FormAlertChannels({
|
||||
|
||||
<Form.Item>
|
||||
<Button
|
||||
disabled={savingState || !hasFeature}
|
||||
disabled={savingState}
|
||||
loading={savingState}
|
||||
type="primary"
|
||||
onClick={(): void => onSaveHandler(type)}
|
||||
@@ -154,7 +139,7 @@ function FormAlertChannels({
|
||||
{t('button_save_channel')}
|
||||
</Button>
|
||||
<Button
|
||||
disabled={testingState || !hasFeature}
|
||||
disabled={testingState}
|
||||
loading={testingState}
|
||||
onClick={(): void => onTestHandler(type)}
|
||||
>
|
||||
|
||||
@@ -467,10 +467,6 @@ function FormAlertRules({
|
||||
panelType,
|
||||
]);
|
||||
|
||||
const isAlertAvailable =
|
||||
!featureFlags?.find((flag) => flag.name === FeatureKeys.QUERY_BUILDER_ALERTS)
|
||||
?.active || false;
|
||||
|
||||
const saveRule = useCallback(async () => {
|
||||
if (!isFormValid()) {
|
||||
return;
|
||||
@@ -688,11 +684,6 @@ function FormAlertRules({
|
||||
|
||||
const isAlertNameMissing = !formInstance.getFieldValue('alert');
|
||||
|
||||
const isAlertAvailableToSave =
|
||||
isAlertAvailable &&
|
||||
currentQuery.queryType === EQueryType.QUERY_BUILDER &&
|
||||
alertType !== AlertTypes.METRICS_BASED_ALERT;
|
||||
|
||||
const onUnitChangeHandler = (value: string): void => {
|
||||
setYAxisUnit(value);
|
||||
// reset target unit
|
||||
@@ -865,7 +856,6 @@ function FormAlertRules({
|
||||
icon={<SaveOutlined />}
|
||||
disabled={
|
||||
isAlertNameMissing ||
|
||||
isAlertAvailableToSave ||
|
||||
!isChannelConfigurationValid ||
|
||||
queryStatus === 'error'
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import { WarningOutlined } from '@ant-design/icons';
|
||||
import { Button, Flex, Modal, Space, Typography } from 'antd';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import OverlayScrollbar from 'components/OverlayScrollbar/OverlayScrollbar';
|
||||
import { FeatureKeys } from 'constants/features';
|
||||
import { QueryParams } from 'constants/query';
|
||||
import {
|
||||
initialQueriesMap,
|
||||
@@ -27,7 +26,6 @@ import { GetQueryResultsProps } from 'lib/dashboard/getQueryResults';
|
||||
import { cloneDeep, defaultTo, isEmpty, isUndefined } from 'lodash-es';
|
||||
import { Check, X } from 'lucide-react';
|
||||
import { DashboardWidgetPageParams } from 'pages/DashboardWidget';
|
||||
import { useAppContext } from 'providers/App/App';
|
||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||
import {
|
||||
getNextWidgets,
|
||||
@@ -79,8 +77,6 @@ function NewWidget({ selectedGraph }: NewWidgetProps): JSX.Element {
|
||||
|
||||
const { t } = useTranslation(['dashboard']);
|
||||
|
||||
const { featureFlags } = useAppContext();
|
||||
|
||||
const { registerShortcut, deregisterShortcut } = useKeyboardHotkeys();
|
||||
|
||||
const {
|
||||
@@ -566,12 +562,7 @@ function NewWidget({ selectedGraph }: NewWidgetProps): JSX.Element {
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
const isQueryBuilderActive =
|
||||
!featureFlags?.find((flag) => flag.name === FeatureKeys.QUERY_BUILDER_PANELS)
|
||||
?.active || false;
|
||||
|
||||
const isNewTraceLogsAvailable =
|
||||
isQueryBuilderActive &&
|
||||
currentQuery.queryType === EQueryType.QUERY_BUILDER &&
|
||||
currentQuery.builder.queryData.find(
|
||||
(query) => query.dataSource !== DataSource.METRICS,
|
||||
|
||||
@@ -13,11 +13,7 @@ function OrganizationSettings(): JSX.Element {
|
||||
const isNotSSO =
|
||||
!featureFlags?.find((flag) => flag.name === FeatureKeys.SSO)?.active || false;
|
||||
|
||||
const isNoUpSell =
|
||||
!featureFlags?.find((flag) => flag.name === FeatureKeys.DISABLE_UPSELL)
|
||||
?.active || false;
|
||||
|
||||
const isAuthDomain = !isNoUpSell || (isNoUpSell && !isNotSSO);
|
||||
const isAuthDomain = !isNotSSO;
|
||||
|
||||
if (!org) {
|
||||
return <div />;
|
||||
|
||||
@@ -284,16 +284,6 @@ function SideNav(): JSX.Element {
|
||||
manageLicenseMenuItem,
|
||||
];
|
||||
|
||||
const isApiMonitoringEnabled = featureFlags?.find(
|
||||
(flag) => flag.name === FeatureKeys.THIRD_PARTY_API,
|
||||
)?.active;
|
||||
|
||||
if (!isApiMonitoringEnabled) {
|
||||
updatedMenuItems = updatedMenuItems.filter(
|
||||
(item) => item.key !== ROUTES.API_MONITORING,
|
||||
);
|
||||
}
|
||||
|
||||
if (isCloudUser || isEnterpriseSelfHostedUser) {
|
||||
const isOnboardingEnabled =
|
||||
featureFlags?.find((feature) => feature.name === FeatureKeys.ONBOARDING)
|
||||
|
||||
@@ -186,76 +186,6 @@ export function getAppContextMock(
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.OSS,
|
||||
active: false,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.DISABLE_UPSELL,
|
||||
active: false,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.CUSTOM_METRICS_FUNCTION,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.QUERY_BUILDER_PANELS,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.QUERY_BUILDER_ALERTS,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.ALERT_CHANNEL_SLACK,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.ALERT_CHANNEL_WEBHOOK,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.ALERT_CHANNEL_PAGERDUTY,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.ALERT_CHANNEL_OPSGENIE,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.ALERT_CHANNEL_MSTEAMS,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.USE_SPAN_METRICS,
|
||||
active: false,
|
||||
|
||||
3
go.mod
@@ -28,6 +28,7 @@ require (
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/gosimple/slug v1.10.0
|
||||
github.com/huandu/go-sqlbuilder v1.35.0
|
||||
github.com/jackc/pgx/v5 v5.7.2
|
||||
github.com/jmoiron/sqlx v1.3.4
|
||||
github.com/json-iterator/go v1.1.12
|
||||
@@ -91,6 +92,7 @@ require (
|
||||
github.com/ClickHouse/ch-go v0.61.5 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||
@@ -150,6 +152,7 @@ require (
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/memberlist v0.5.1 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
|
||||
8
go.sum
@@ -113,6 +113,8 @@ github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
|
||||
github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI=
|
||||
github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
@@ -537,6 +539,12 @@ github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOF
|
||||
github.com/hjson/hjson-go/v4 v4.0.0 h1:wlm6IYYqHjOdXH1gHev4VoXCaW20HdQAGCxdOEEg2cs=
|
||||
github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/go-assert v1.1.6 h1:oaAfYxq9KNDi9qswn/6aE0EydfxSa+tWZC1KabNitYs=
|
||||
github.com/huandu/go-assert v1.1.6/go.mod h1:JuIfbmYG9ykwvuxoJ3V8TB5QP+3+ajIA54Y44TmkMxs=
|
||||
github.com/huandu/go-sqlbuilder v1.35.0 h1:ESvxFHN8vxCTudY1Vq63zYpU5yJBESn19sf6k4v2T5Q=
|
||||
github.com/huandu/go-sqlbuilder v1.35.0/go.mod h1:mS0GAtrtW+XL6nM2/gXHRJax2RwSW1TraavWDFAc1JA=
|
||||
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
|
||||
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
|
||||
221
grammar/FilterQuery.g4
Normal file
@@ -0,0 +1,221 @@
|
||||
grammar FilterQuery;
|
||||
|
||||
/*
|
||||
* Parser Rules
|
||||
*/
|
||||
|
||||
query
|
||||
: expression
|
||||
EOF
|
||||
;
|
||||
|
||||
// Expression with standard boolean precedence:
|
||||
// - parentheses > NOT > AND > OR
|
||||
// - consecutive expressions with no AND/OR => implicit AND
|
||||
expression
|
||||
: orExpression
|
||||
;
|
||||
|
||||
// OR expressions
|
||||
orExpression
|
||||
: andExpression ( OR andExpression )*
|
||||
;
|
||||
|
||||
// AND expressions + optional chaining with implicit AND if no OR is present
|
||||
andExpression
|
||||
: unaryExpression ( AND unaryExpression | unaryExpression )*
|
||||
;
|
||||
|
||||
// A unary expression handles optional NOT
|
||||
unaryExpression
|
||||
: NOT? primary
|
||||
;
|
||||
|
||||
// Primary constructs: grouped expressions, a comparison (key op value),
|
||||
// a function call, or a full-text string
|
||||
primary
|
||||
: LPAREN orExpression RPAREN
|
||||
| comparison
|
||||
| functionCall
|
||||
| fullText
|
||||
| key
|
||||
;
|
||||
|
||||
/*
|
||||
* Comparison-like filters
|
||||
*
|
||||
* Includes all operators: =, !=, <>, <, <=, >, >=, [NOT] LIKE, [NOT] ILIKE,
|
||||
* [NOT] BETWEEN, [NOT] IN, [NOT] EXISTS, [NOT] REGEXP, [NOT] CONTAINS, etc.
|
||||
*/
|
||||
comparison
|
||||
: key EQUALS value
|
||||
| key (NOT_EQUALS | NEQ) value
|
||||
| key LT value
|
||||
| key LE value
|
||||
| key GT value
|
||||
| key GE value
|
||||
|
||||
| key (LIKE | ILIKE) value
|
||||
| key (NOT_LIKE | NOT_ILIKE) value
|
||||
|
||||
| key BETWEEN value AND value
|
||||
| key NOT BETWEEN value AND value
|
||||
|
||||
| key inClause
|
||||
| key notInClause
|
||||
|
||||
| key EXISTS
|
||||
| key NOT EXISTS
|
||||
|
||||
| key REGEXP value
|
||||
| key NOT REGEXP value
|
||||
|
||||
| key CONTAINS value
|
||||
| key NOT CONTAINS value
|
||||
;
|
||||
|
||||
// in(...) or in[...]
|
||||
inClause
|
||||
: IN LPAREN valueList RPAREN
|
||||
| IN LBRACK valueList RBRACK
|
||||
;
|
||||
|
||||
notInClause
|
||||
: NOT IN LPAREN valueList RPAREN
|
||||
| NOT IN LBRACK valueList RBRACK
|
||||
;
|
||||
|
||||
// List of values for in(...) or in[...]
|
||||
valueList
|
||||
: value ( COMMA value )*
|
||||
;
|
||||
|
||||
// Full-text search: a standalone quoted string is allowed as a "primary"
|
||||
// e.g. `"Waiting for response" http.status_code=200`
|
||||
fullText
|
||||
: QUOTED_TEXT
|
||||
| FREETEXT
|
||||
;
|
||||
|
||||
/*
|
||||
* Function calls like:
|
||||
* has(payload.user_ids, 123)
|
||||
* hasAny(payload.user_ids, [123, 456])
|
||||
* ...
|
||||
*/
|
||||
functionCall
|
||||
: (HAS | HASANY | HASALL | HASNONE) LPAREN functionParamList RPAREN
|
||||
;
|
||||
|
||||
// Function parameters can be keys, single scalar values, or arrays
|
||||
functionParamList
|
||||
: functionParam ( COMMA functionParam )*
|
||||
;
|
||||
|
||||
functionParam
|
||||
: key
|
||||
| value
|
||||
| array
|
||||
;
|
||||
|
||||
// An array: [ item1, item2, item3 ]
|
||||
array
|
||||
: LBRACK valueList RBRACK
|
||||
;
|
||||
|
||||
/*
|
||||
* A 'value' can be a string literal (double or single-quoted),
|
||||
// a numeric literal, boolean, or a "bare" token as needed.
|
||||
*/
|
||||
value
|
||||
: QUOTED_TEXT
|
||||
| NUMBER
|
||||
| BOOL
|
||||
| KEY
|
||||
;
|
||||
|
||||
/*
|
||||
* A key can include letters, digits, underscores, dots, brackets
|
||||
* E.g. service.name, query_log.query_duration_ms, proto.user_objects[].name
|
||||
*/
|
||||
key
|
||||
: KEY
|
||||
;
|
||||
|
||||
|
||||
/*
|
||||
* Lexer Rules
|
||||
*/
|
||||
|
||||
// Common punctuation / symbols
|
||||
LPAREN : '(' ;
|
||||
RPAREN : ')' ;
|
||||
LBRACK : '[' ;
|
||||
RBRACK : ']' ;
|
||||
COMMA : ',' ;
|
||||
|
||||
EQUALS : '=' | '==' ;
|
||||
NOT_EQUALS : '!=' ;
|
||||
NEQ : '<>' ; // alternate not-equals operator
|
||||
LT : '<' ;
|
||||
LE : '<=' ;
|
||||
GT : '>' ;
|
||||
GE : '>=' ;
|
||||
|
||||
// Operators that are made of multiple keywords
|
||||
LIKE : [Ll][Ii][Kk][Ee] ;
|
||||
NOT_LIKE : [Nn][Oo][Tt] [ \t]+ [Ll][Ii][Kk][Ee] ;
|
||||
ILIKE : [Ii][Ll][Ii][Kk][Ee] ;
|
||||
NOT_ILIKE : [Nn][Oo][Tt] [ \t]+ [Ii][Ll][Ii][Kk][Ee] ;
|
||||
BETWEEN : [Bb][Ee][Tt][Ww][Ee][Ee][Nn] ;
|
||||
EXISTS : [Ee][Xx][Ii][Ss][Tt][Ss]? ;
|
||||
REGEXP : [Rr][Ee][Gg][Ee][Xx][Pp] ;
|
||||
CONTAINS : [Cc][Oo][Nn][Tt][Aa][Ii][Nn][Ss]? ;
|
||||
IN : [Ii][Nn] ;
|
||||
|
||||
// Boolean logic
|
||||
NOT : [Nn][Oo][Tt] ;
|
||||
AND : [Aa][Nn][Dd] ;
|
||||
OR : [Oo][Rr] ;
|
||||
|
||||
// For easy referencing in function calls
|
||||
HAS : [Hh][Aa][Ss] ;
|
||||
HASANY : [Hh][Aa][Ss][Aa][Nn][Yy] ;
|
||||
HASALL : [Hh][Aa][Ss][Aa][Ll][Ll] ;
|
||||
HASNONE : [Hh][Aa][Ss][Nn][Oo][Nn][Ee] ;
|
||||
|
||||
// Potential boolean constants
|
||||
BOOL
|
||||
: [Tt][Rr][Uu][Ee]
|
||||
| [Ff][Aa][Ll][Ss][Ee]
|
||||
;
|
||||
|
||||
// Numbers (integer or float). Adjust as needed for your domain.
|
||||
NUMBER
|
||||
: DIGIT+ ( '.' DIGIT+ )?
|
||||
;
|
||||
|
||||
// Double/single-quoted text, capturing full text search strings, values, etc.
|
||||
QUOTED_TEXT
|
||||
: ( '"' ( ~["\\] | '\\' . )* '"' // double-quoted
|
||||
| '\'' ( ~['\\] | '\\' . )* '\'' // single-quoted
|
||||
)
|
||||
;
|
||||
|
||||
// Keys can have letters, digits, underscores, dots, and bracket pairs
|
||||
// e.g. service.name, service.namespace, db.queries[].query_duration
|
||||
KEY
|
||||
: [a-zA-Z0-9_] [a-zA-Z0-9_.[\]]*
|
||||
;
|
||||
|
||||
// Ignore whitespace
|
||||
WS
|
||||
: [ \t\r\n]+ -> skip
|
||||
;
|
||||
|
||||
// Digits used by NUMBER
|
||||
fragment DIGIT
|
||||
: [0-9]
|
||||
;
|
||||
|
||||
FREETEXT : (~[ \t\r\n=()'"<>![\]])+ ;
|
||||
96
pkg/parser/grammar/FilterQuery.interp
Normal file
45
pkg/parser/grammar/FilterQuery.tokens
Normal file
@@ -0,0 +1,45 @@
|
||||
LPAREN=1
|
||||
RPAREN=2
|
||||
LBRACK=3
|
||||
RBRACK=4
|
||||
COMMA=5
|
||||
EQUALS=6
|
||||
NOT_EQUALS=7
|
||||
NEQ=8
|
||||
LT=9
|
||||
LE=10
|
||||
GT=11
|
||||
GE=12
|
||||
LIKE=13
|
||||
NOT_LIKE=14
|
||||
ILIKE=15
|
||||
NOT_ILIKE=16
|
||||
BETWEEN=17
|
||||
EXISTS=18
|
||||
REGEXP=19
|
||||
CONTAINS=20
|
||||
IN=21
|
||||
NOT=22
|
||||
AND=23
|
||||
OR=24
|
||||
HAS=25
|
||||
HASANY=26
|
||||
HASALL=27
|
||||
HASNONE=28
|
||||
BOOL=29
|
||||
NUMBER=30
|
||||
QUOTED_TEXT=31
|
||||
KEY=32
|
||||
WS=33
|
||||
FREETEXT=34
|
||||
'('=1
|
||||
')'=2
|
||||
'['=3
|
||||
']'=4
|
||||
','=5
|
||||
'!='=7
|
||||
'<>'=8
|
||||
'<'=9
|
||||
'<='=10
|
||||
'>'=11
|
||||
'>='=12
|
||||
120
pkg/parser/grammar/FilterQueryLexer.interp
Normal file
45
pkg/parser/grammar/FilterQueryLexer.tokens
Normal file
@@ -0,0 +1,45 @@
|
||||
LPAREN=1
|
||||
RPAREN=2
|
||||
LBRACK=3
|
||||
RBRACK=4
|
||||
COMMA=5
|
||||
EQUALS=6
|
||||
NOT_EQUALS=7
|
||||
NEQ=8
|
||||
LT=9
|
||||
LE=10
|
||||
GT=11
|
||||
GE=12
|
||||
LIKE=13
|
||||
NOT_LIKE=14
|
||||
ILIKE=15
|
||||
NOT_ILIKE=16
|
||||
BETWEEN=17
|
||||
EXISTS=18
|
||||
REGEXP=19
|
||||
CONTAINS=20
|
||||
IN=21
|
||||
NOT=22
|
||||
AND=23
|
||||
OR=24
|
||||
HAS=25
|
||||
HASANY=26
|
||||
HASALL=27
|
||||
HASNONE=28
|
||||
BOOL=29
|
||||
NUMBER=30
|
||||
QUOTED_TEXT=31
|
||||
KEY=32
|
||||
WS=33
|
||||
FREETEXT=34
|
||||
'('=1
|
||||
')'=2
|
||||
'['=3
|
||||
']'=4
|
||||
','=5
|
||||
'!='=7
|
||||
'<>'=8
|
||||
'<'=9
|
||||
'<='=10
|
||||
'>'=11
|
||||
'>='=12
|
||||
124
pkg/parser/grammar/filterquery_base_listener.go
Normal file
@@ -0,0 +1,124 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
// BaseFilterQueryListener is a complete listener for a parse tree produced by FilterQueryParser.
|
||||
type BaseFilterQueryListener struct{}
|
||||
|
||||
var _ FilterQueryListener = &BaseFilterQueryListener{}
|
||||
|
||||
// VisitTerminal is called when a terminal node is visited.
|
||||
func (s *BaseFilterQueryListener) VisitTerminal(node antlr.TerminalNode) {}
|
||||
|
||||
// VisitErrorNode is called when an error node is visited.
|
||||
func (s *BaseFilterQueryListener) VisitErrorNode(node antlr.ErrorNode) {}
|
||||
|
||||
// EnterEveryRule is called when any rule is entered.
|
||||
func (s *BaseFilterQueryListener) EnterEveryRule(ctx antlr.ParserRuleContext) {}
|
||||
|
||||
// ExitEveryRule is called when any rule is exited.
|
||||
func (s *BaseFilterQueryListener) ExitEveryRule(ctx antlr.ParserRuleContext) {}
|
||||
|
||||
// EnterQuery is called when production query is entered.
|
||||
func (s *BaseFilterQueryListener) EnterQuery(ctx *QueryContext) {}
|
||||
|
||||
// ExitQuery is called when production query is exited.
|
||||
func (s *BaseFilterQueryListener) ExitQuery(ctx *QueryContext) {}
|
||||
|
||||
// EnterExpression is called when production expression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterExpression(ctx *ExpressionContext) {}
|
||||
|
||||
// ExitExpression is called when production expression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitExpression(ctx *ExpressionContext) {}
|
||||
|
||||
// EnterOrExpression is called when production orExpression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterOrExpression(ctx *OrExpressionContext) {}
|
||||
|
||||
// ExitOrExpression is called when production orExpression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitOrExpression(ctx *OrExpressionContext) {}
|
||||
|
||||
// EnterAndExpression is called when production andExpression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterAndExpression(ctx *AndExpressionContext) {}
|
||||
|
||||
// ExitAndExpression is called when production andExpression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitAndExpression(ctx *AndExpressionContext) {}
|
||||
|
||||
// EnterUnaryExpression is called when production unaryExpression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterUnaryExpression(ctx *UnaryExpressionContext) {}
|
||||
|
||||
// ExitUnaryExpression is called when production unaryExpression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitUnaryExpression(ctx *UnaryExpressionContext) {}
|
||||
|
||||
// EnterPrimary is called when production primary is entered.
|
||||
func (s *BaseFilterQueryListener) EnterPrimary(ctx *PrimaryContext) {}
|
||||
|
||||
// ExitPrimary is called when production primary is exited.
|
||||
func (s *BaseFilterQueryListener) ExitPrimary(ctx *PrimaryContext) {}
|
||||
|
||||
// EnterComparison is called when production comparison is entered.
|
||||
func (s *BaseFilterQueryListener) EnterComparison(ctx *ComparisonContext) {}
|
||||
|
||||
// ExitComparison is called when production comparison is exited.
|
||||
func (s *BaseFilterQueryListener) ExitComparison(ctx *ComparisonContext) {}
|
||||
|
||||
// EnterInClause is called when production inClause is entered.
|
||||
func (s *BaseFilterQueryListener) EnterInClause(ctx *InClauseContext) {}
|
||||
|
||||
// ExitInClause is called when production inClause is exited.
|
||||
func (s *BaseFilterQueryListener) ExitInClause(ctx *InClauseContext) {}
|
||||
|
||||
// EnterNotInClause is called when production notInClause is entered.
|
||||
func (s *BaseFilterQueryListener) EnterNotInClause(ctx *NotInClauseContext) {}
|
||||
|
||||
// ExitNotInClause is called when production notInClause is exited.
|
||||
func (s *BaseFilterQueryListener) ExitNotInClause(ctx *NotInClauseContext) {}
|
||||
|
||||
// EnterValueList is called when production valueList is entered.
|
||||
func (s *BaseFilterQueryListener) EnterValueList(ctx *ValueListContext) {}
|
||||
|
||||
// ExitValueList is called when production valueList is exited.
|
||||
func (s *BaseFilterQueryListener) ExitValueList(ctx *ValueListContext) {}
|
||||
|
||||
// EnterFullText is called when production fullText is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFullText(ctx *FullTextContext) {}
|
||||
|
||||
// ExitFullText is called when production fullText is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFullText(ctx *FullTextContext) {}
|
||||
|
||||
// EnterFunctionCall is called when production functionCall is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFunctionCall(ctx *FunctionCallContext) {}
|
||||
|
||||
// ExitFunctionCall is called when production functionCall is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFunctionCall(ctx *FunctionCallContext) {}
|
||||
|
||||
// EnterFunctionParamList is called when production functionParamList is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFunctionParamList(ctx *FunctionParamListContext) {}
|
||||
|
||||
// ExitFunctionParamList is called when production functionParamList is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFunctionParamList(ctx *FunctionParamListContext) {}
|
||||
|
||||
// EnterFunctionParam is called when production functionParam is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFunctionParam(ctx *FunctionParamContext) {}
|
||||
|
||||
// ExitFunctionParam is called when production functionParam is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFunctionParam(ctx *FunctionParamContext) {}
|
||||
|
||||
// EnterArray is called when production array is entered.
|
||||
func (s *BaseFilterQueryListener) EnterArray(ctx *ArrayContext) {}
|
||||
|
||||
// ExitArray is called when production array is exited.
|
||||
func (s *BaseFilterQueryListener) ExitArray(ctx *ArrayContext) {}
|
||||
|
||||
// EnterValue is called when production value is entered.
|
||||
func (s *BaseFilterQueryListener) EnterValue(ctx *ValueContext) {}
|
||||
|
||||
// ExitValue is called when production value is exited.
|
||||
func (s *BaseFilterQueryListener) ExitValue(ctx *ValueContext) {}
|
||||
|
||||
// EnterKey is called when production key is entered.
|
||||
func (s *BaseFilterQueryListener) EnterKey(ctx *KeyContext) {}
|
||||
|
||||
// ExitKey is called when production key is exited.
|
||||
func (s *BaseFilterQueryListener) ExitKey(ctx *KeyContext) {}
|
||||
77
pkg/parser/grammar/filterquery_base_visitor.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
type BaseFilterQueryVisitor struct {
|
||||
*antlr.BaseParseTreeVisitor
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitQuery(ctx *QueryContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitExpression(ctx *ExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitOrExpression(ctx *OrExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitAndExpression(ctx *AndExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitUnaryExpression(ctx *UnaryExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitPrimary(ctx *PrimaryContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitComparison(ctx *ComparisonContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitInClause(ctx *InClauseContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitNotInClause(ctx *NotInClauseContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitValueList(ctx *ValueListContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFullText(ctx *FullTextContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFunctionCall(ctx *FunctionCallContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFunctionParamList(ctx *FunctionParamListContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFunctionParam(ctx *FunctionParamContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitArray(ctx *ArrayContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitValue(ctx *ValueContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitKey(ctx *KeyContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
271
pkg/parser/grammar/filterquery_lexer.go
Normal file
@@ -0,0 +1,271 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/antlr4-go/antlr/v4"
|
||||
"sync"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Suppress unused import error
|
||||
var _ = fmt.Printf
|
||||
var _ = sync.Once{}
|
||||
var _ = unicode.IsLetter
|
||||
|
||||
type FilterQueryLexer struct {
|
||||
*antlr.BaseLexer
|
||||
channelNames []string
|
||||
modeNames []string
|
||||
// TODO: EOF string
|
||||
}
|
||||
|
||||
var FilterQueryLexerLexerStaticData struct {
|
||||
once sync.Once
|
||||
serializedATN []int32
|
||||
ChannelNames []string
|
||||
ModeNames []string
|
||||
LiteralNames []string
|
||||
SymbolicNames []string
|
||||
RuleNames []string
|
||||
PredictionContextCache *antlr.PredictionContextCache
|
||||
atn *antlr.ATN
|
||||
decisionToDFA []*antlr.DFA
|
||||
}
|
||||
|
||||
func filterquerylexerLexerInit() {
|
||||
staticData := &FilterQueryLexerLexerStaticData
|
||||
staticData.ChannelNames = []string{
|
||||
"DEFAULT_TOKEN_CHANNEL", "HIDDEN",
|
||||
}
|
||||
staticData.ModeNames = []string{
|
||||
"DEFAULT_MODE",
|
||||
}
|
||||
staticData.LiteralNames = []string{
|
||||
"", "'('", "')'", "'['", "']'", "','", "", "'!='", "'<>'", "'<'", "'<='",
|
||||
"'>'", "'>='",
|
||||
}
|
||||
staticData.SymbolicNames = []string{
|
||||
"", "LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
|
||||
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "NOT_LIKE", "ILIKE", "NOT_ILIKE",
|
||||
"BETWEEN", "EXISTS", "REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR",
|
||||
"HAS", "HASANY", "HASALL", "HASNONE", "BOOL", "NUMBER", "QUOTED_TEXT",
|
||||
"KEY", "WS", "FREETEXT",
|
||||
}
|
||||
staticData.RuleNames = []string{
|
||||
"LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
|
||||
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "NOT_LIKE", "ILIKE", "NOT_ILIKE",
|
||||
"BETWEEN", "EXISTS", "REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR",
|
||||
"HAS", "HASANY", "HASALL", "HASNONE", "BOOL", "NUMBER", "QUOTED_TEXT",
|
||||
"KEY", "WS", "DIGIT", "FREETEXT",
|
||||
}
|
||||
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
|
||||
staticData.serializedATN = []int32{
|
||||
4, 0, 34, 280, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
|
||||
4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
|
||||
10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
|
||||
7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
|
||||
20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
|
||||
2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
|
||||
31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 1, 0, 1, 0, 1, 1,
|
||||
1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 3, 5, 85, 8,
|
||||
5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1,
|
||||
10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13,
|
||||
1, 13, 1, 13, 1, 13, 4, 13, 112, 8, 13, 11, 13, 12, 13, 113, 1, 13, 1,
|
||||
13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15,
|
||||
1, 15, 1, 15, 1, 15, 4, 15, 131, 8, 15, 11, 15, 12, 15, 132, 1, 15, 1,
|
||||
15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16,
|
||||
1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 3, 17, 155, 8,
|
||||
17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19,
|
||||
1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 172, 8, 19, 1, 20, 1, 20, 1,
|
||||
20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23,
|
||||
1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1,
|
||||
25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27,
|
||||
1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1,
|
||||
28, 1, 28, 1, 28, 1, 28, 1, 28, 3, 28, 223, 8, 28, 1, 29, 4, 29, 226, 8,
|
||||
29, 11, 29, 12, 29, 227, 1, 29, 1, 29, 4, 29, 232, 8, 29, 11, 29, 12, 29,
|
||||
233, 3, 29, 236, 8, 29, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 242, 8, 30,
|
||||
10, 30, 12, 30, 245, 9, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 252,
|
||||
8, 30, 10, 30, 12, 30, 255, 9, 30, 1, 30, 3, 30, 258, 8, 30, 1, 31, 1,
|
||||
31, 5, 31, 262, 8, 31, 10, 31, 12, 31, 265, 9, 31, 1, 32, 4, 32, 268, 8,
|
||||
32, 11, 32, 12, 32, 269, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 4, 34, 277,
|
||||
8, 34, 11, 34, 12, 34, 278, 0, 0, 35, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11,
|
||||
6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15,
|
||||
31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24,
|
||||
49, 25, 51, 26, 53, 27, 55, 28, 57, 29, 59, 30, 61, 31, 63, 32, 65, 33,
|
||||
67, 0, 69, 34, 1, 0, 29, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105,
|
||||
2, 0, 75, 75, 107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 78, 78, 110, 110,
|
||||
2, 0, 79, 79, 111, 111, 2, 0, 84, 84, 116, 116, 2, 0, 9, 9, 32, 32, 2,
|
||||
0, 66, 66, 98, 98, 2, 0, 87, 87, 119, 119, 2, 0, 88, 88, 120, 120, 2, 0,
|
||||
83, 83, 115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71, 71, 103, 103, 2, 0,
|
||||
80, 80, 112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 65, 65, 97, 97, 2, 0, 68,
|
||||
68, 100, 100, 2, 0, 72, 72, 104, 104, 2, 0, 89, 89, 121, 121, 2, 0, 85,
|
||||
85, 117, 117, 2, 0, 70, 70, 102, 102, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39,
|
||||
92, 92, 4, 0, 48, 57, 65, 90, 95, 95, 97, 122, 6, 0, 46, 46, 48, 57, 65,
|
||||
91, 93, 93, 95, 95, 97, 122, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57,
|
||||
7, 0, 9, 10, 13, 13, 32, 34, 39, 41, 60, 62, 91, 91, 93, 93, 295, 0, 1,
|
||||
1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9,
|
||||
1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0,
|
||||
17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0,
|
||||
0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0,
|
||||
0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0,
|
||||
0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1,
|
||||
0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55,
|
||||
1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0,
|
||||
63, 1, 0, 0, 0, 0, 65, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 1, 71, 1, 0, 0, 0,
|
||||
3, 73, 1, 0, 0, 0, 5, 75, 1, 0, 0, 0, 7, 77, 1, 0, 0, 0, 9, 79, 1, 0, 0,
|
||||
0, 11, 84, 1, 0, 0, 0, 13, 86, 1, 0, 0, 0, 15, 89, 1, 0, 0, 0, 17, 92,
|
||||
1, 0, 0, 0, 19, 94, 1, 0, 0, 0, 21, 97, 1, 0, 0, 0, 23, 99, 1, 0, 0, 0,
|
||||
25, 102, 1, 0, 0, 0, 27, 107, 1, 0, 0, 0, 29, 120, 1, 0, 0, 0, 31, 126,
|
||||
1, 0, 0, 0, 33, 140, 1, 0, 0, 0, 35, 148, 1, 0, 0, 0, 37, 156, 1, 0, 0,
|
||||
0, 39, 163, 1, 0, 0, 0, 41, 173, 1, 0, 0, 0, 43, 176, 1, 0, 0, 0, 45, 180,
|
||||
1, 0, 0, 0, 47, 184, 1, 0, 0, 0, 49, 187, 1, 0, 0, 0, 51, 191, 1, 0, 0,
|
||||
0, 53, 198, 1, 0, 0, 0, 55, 205, 1, 0, 0, 0, 57, 222, 1, 0, 0, 0, 59, 225,
|
||||
1, 0, 0, 0, 61, 257, 1, 0, 0, 0, 63, 259, 1, 0, 0, 0, 65, 267, 1, 0, 0,
|
||||
0, 67, 273, 1, 0, 0, 0, 69, 276, 1, 0, 0, 0, 71, 72, 5, 40, 0, 0, 72, 2,
|
||||
1, 0, 0, 0, 73, 74, 5, 41, 0, 0, 74, 4, 1, 0, 0, 0, 75, 76, 5, 91, 0, 0,
|
||||
76, 6, 1, 0, 0, 0, 77, 78, 5, 93, 0, 0, 78, 8, 1, 0, 0, 0, 79, 80, 5, 44,
|
||||
0, 0, 80, 10, 1, 0, 0, 0, 81, 85, 5, 61, 0, 0, 82, 83, 5, 61, 0, 0, 83,
|
||||
85, 5, 61, 0, 0, 84, 81, 1, 0, 0, 0, 84, 82, 1, 0, 0, 0, 85, 12, 1, 0,
|
||||
0, 0, 86, 87, 5, 33, 0, 0, 87, 88, 5, 61, 0, 0, 88, 14, 1, 0, 0, 0, 89,
|
||||
90, 5, 60, 0, 0, 90, 91, 5, 62, 0, 0, 91, 16, 1, 0, 0, 0, 92, 93, 5, 60,
|
||||
0, 0, 93, 18, 1, 0, 0, 0, 94, 95, 5, 60, 0, 0, 95, 96, 5, 61, 0, 0, 96,
|
||||
20, 1, 0, 0, 0, 97, 98, 5, 62, 0, 0, 98, 22, 1, 0, 0, 0, 99, 100, 5, 62,
|
||||
0, 0, 100, 101, 5, 61, 0, 0, 101, 24, 1, 0, 0, 0, 102, 103, 7, 0, 0, 0,
|
||||
103, 104, 7, 1, 0, 0, 104, 105, 7, 2, 0, 0, 105, 106, 7, 3, 0, 0, 106,
|
||||
26, 1, 0, 0, 0, 107, 108, 7, 4, 0, 0, 108, 109, 7, 5, 0, 0, 109, 111, 7,
|
||||
6, 0, 0, 110, 112, 7, 7, 0, 0, 111, 110, 1, 0, 0, 0, 112, 113, 1, 0, 0,
|
||||
0, 113, 111, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115,
|
||||
116, 7, 0, 0, 0, 116, 117, 7, 1, 0, 0, 117, 118, 7, 2, 0, 0, 118, 119,
|
||||
7, 3, 0, 0, 119, 28, 1, 0, 0, 0, 120, 121, 7, 1, 0, 0, 121, 122, 7, 0,
|
||||
0, 0, 122, 123, 7, 1, 0, 0, 123, 124, 7, 2, 0, 0, 124, 125, 7, 3, 0, 0,
|
||||
125, 30, 1, 0, 0, 0, 126, 127, 7, 4, 0, 0, 127, 128, 7, 5, 0, 0, 128, 130,
|
||||
7, 6, 0, 0, 129, 131, 7, 7, 0, 0, 130, 129, 1, 0, 0, 0, 131, 132, 1, 0,
|
||||
0, 0, 132, 130, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0,
|
||||
134, 135, 7, 1, 0, 0, 135, 136, 7, 0, 0, 0, 136, 137, 7, 1, 0, 0, 137,
|
||||
138, 7, 2, 0, 0, 138, 139, 7, 3, 0, 0, 139, 32, 1, 0, 0, 0, 140, 141, 7,
|
||||
8, 0, 0, 141, 142, 7, 3, 0, 0, 142, 143, 7, 6, 0, 0, 143, 144, 7, 9, 0,
|
||||
0, 144, 145, 7, 3, 0, 0, 145, 146, 7, 3, 0, 0, 146, 147, 7, 4, 0, 0, 147,
|
||||
34, 1, 0, 0, 0, 148, 149, 7, 3, 0, 0, 149, 150, 7, 10, 0, 0, 150, 151,
|
||||
7, 1, 0, 0, 151, 152, 7, 11, 0, 0, 152, 154, 7, 6, 0, 0, 153, 155, 7, 11,
|
||||
0, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 36, 1, 0, 0, 0,
|
||||
156, 157, 7, 12, 0, 0, 157, 158, 7, 3, 0, 0, 158, 159, 7, 13, 0, 0, 159,
|
||||
160, 7, 3, 0, 0, 160, 161, 7, 10, 0, 0, 161, 162, 7, 14, 0, 0, 162, 38,
|
||||
1, 0, 0, 0, 163, 164, 7, 15, 0, 0, 164, 165, 7, 5, 0, 0, 165, 166, 7, 4,
|
||||
0, 0, 166, 167, 7, 6, 0, 0, 167, 168, 7, 16, 0, 0, 168, 169, 7, 1, 0, 0,
|
||||
169, 171, 7, 4, 0, 0, 170, 172, 7, 11, 0, 0, 171, 170, 1, 0, 0, 0, 171,
|
||||
172, 1, 0, 0, 0, 172, 40, 1, 0, 0, 0, 173, 174, 7, 1, 0, 0, 174, 175, 7,
|
||||
4, 0, 0, 175, 42, 1, 0, 0, 0, 176, 177, 7, 4, 0, 0, 177, 178, 7, 5, 0,
|
||||
0, 178, 179, 7, 6, 0, 0, 179, 44, 1, 0, 0, 0, 180, 181, 7, 16, 0, 0, 181,
|
||||
182, 7, 4, 0, 0, 182, 183, 7, 17, 0, 0, 183, 46, 1, 0, 0, 0, 184, 185,
|
||||
7, 5, 0, 0, 185, 186, 7, 12, 0, 0, 186, 48, 1, 0, 0, 0, 187, 188, 7, 18,
|
||||
0, 0, 188, 189, 7, 16, 0, 0, 189, 190, 7, 11, 0, 0, 190, 50, 1, 0, 0, 0,
|
||||
191, 192, 7, 18, 0, 0, 192, 193, 7, 16, 0, 0, 193, 194, 7, 11, 0, 0, 194,
|
||||
195, 7, 16, 0, 0, 195, 196, 7, 4, 0, 0, 196, 197, 7, 19, 0, 0, 197, 52,
|
||||
1, 0, 0, 0, 198, 199, 7, 18, 0, 0, 199, 200, 7, 16, 0, 0, 200, 201, 7,
|
||||
11, 0, 0, 201, 202, 7, 16, 0, 0, 202, 203, 7, 0, 0, 0, 203, 204, 7, 0,
|
||||
0, 0, 204, 54, 1, 0, 0, 0, 205, 206, 7, 18, 0, 0, 206, 207, 7, 16, 0, 0,
|
||||
207, 208, 7, 11, 0, 0, 208, 209, 7, 4, 0, 0, 209, 210, 7, 5, 0, 0, 210,
|
||||
211, 7, 4, 0, 0, 211, 212, 7, 3, 0, 0, 212, 56, 1, 0, 0, 0, 213, 214, 7,
|
||||
6, 0, 0, 214, 215, 7, 12, 0, 0, 215, 216, 7, 20, 0, 0, 216, 223, 7, 3,
|
||||
0, 0, 217, 218, 7, 21, 0, 0, 218, 219, 7, 16, 0, 0, 219, 220, 7, 0, 0,
|
||||
0, 220, 221, 7, 11, 0, 0, 221, 223, 7, 3, 0, 0, 222, 213, 1, 0, 0, 0, 222,
|
||||
217, 1, 0, 0, 0, 223, 58, 1, 0, 0, 0, 224, 226, 3, 67, 33, 0, 225, 224,
|
||||
1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 225, 1, 0, 0, 0, 227, 228, 1, 0,
|
||||
0, 0, 228, 235, 1, 0, 0, 0, 229, 231, 5, 46, 0, 0, 230, 232, 3, 67, 33,
|
||||
0, 231, 230, 1, 0, 0, 0, 232, 233, 1, 0, 0, 0, 233, 231, 1, 0, 0, 0, 233,
|
||||
234, 1, 0, 0, 0, 234, 236, 1, 0, 0, 0, 235, 229, 1, 0, 0, 0, 235, 236,
|
||||
1, 0, 0, 0, 236, 60, 1, 0, 0, 0, 237, 243, 5, 34, 0, 0, 238, 242, 8, 22,
|
||||
0, 0, 239, 240, 5, 92, 0, 0, 240, 242, 9, 0, 0, 0, 241, 238, 1, 0, 0, 0,
|
||||
241, 239, 1, 0, 0, 0, 242, 245, 1, 0, 0, 0, 243, 241, 1, 0, 0, 0, 243,
|
||||
244, 1, 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 243, 1, 0, 0, 0, 246, 258,
|
||||
5, 34, 0, 0, 247, 253, 5, 39, 0, 0, 248, 252, 8, 23, 0, 0, 249, 250, 5,
|
||||
92, 0, 0, 250, 252, 9, 0, 0, 0, 251, 248, 1, 0, 0, 0, 251, 249, 1, 0, 0,
|
||||
0, 252, 255, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254,
|
||||
256, 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 256, 258, 5, 39, 0, 0, 257, 237,
|
||||
1, 0, 0, 0, 257, 247, 1, 0, 0, 0, 258, 62, 1, 0, 0, 0, 259, 263, 7, 24,
|
||||
0, 0, 260, 262, 7, 25, 0, 0, 261, 260, 1, 0, 0, 0, 262, 265, 1, 0, 0, 0,
|
||||
263, 261, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 64, 1, 0, 0, 0, 265, 263,
|
||||
1, 0, 0, 0, 266, 268, 7, 26, 0, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0,
|
||||
0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0,
|
||||
271, 272, 6, 32, 0, 0, 272, 66, 1, 0, 0, 0, 273, 274, 7, 27, 0, 0, 274,
|
||||
68, 1, 0, 0, 0, 275, 277, 8, 28, 0, 0, 276, 275, 1, 0, 0, 0, 277, 278,
|
||||
1, 0, 0, 0, 278, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 70, 1, 0,
|
||||
0, 0, 18, 0, 84, 113, 132, 154, 171, 222, 227, 233, 235, 241, 243, 251,
|
||||
253, 257, 263, 269, 278, 1, 6, 0, 0,
|
||||
}
|
||||
deserializer := antlr.NewATNDeserializer(nil)
|
||||
staticData.atn = deserializer.Deserialize(staticData.serializedATN)
|
||||
atn := staticData.atn
|
||||
staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
|
||||
decisionToDFA := staticData.decisionToDFA
|
||||
for index, state := range atn.DecisionToState {
|
||||
decisionToDFA[index] = antlr.NewDFA(state, index)
|
||||
}
|
||||
}
|
||||
|
||||
// FilterQueryLexerInit initializes any static state used to implement FilterQueryLexer. By default the
|
||||
// static state used to implement the lexer is lazily initialized during the first call to
|
||||
// NewFilterQueryLexer(). You can call this function if you wish to initialize the static state ahead
|
||||
// of time.
|
||||
func FilterQueryLexerInit() {
|
||||
staticData := &FilterQueryLexerLexerStaticData
|
||||
staticData.once.Do(filterquerylexerLexerInit)
|
||||
}
|
||||
|
||||
// NewFilterQueryLexer produces a new lexer instance for the optional input antlr.CharStream.
|
||||
func NewFilterQueryLexer(input antlr.CharStream) *FilterQueryLexer {
|
||||
FilterQueryLexerInit()
|
||||
l := new(FilterQueryLexer)
|
||||
l.BaseLexer = antlr.NewBaseLexer(input)
|
||||
staticData := &FilterQueryLexerLexerStaticData
|
||||
l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache)
|
||||
l.channelNames = staticData.ChannelNames
|
||||
l.modeNames = staticData.ModeNames
|
||||
l.RuleNames = staticData.RuleNames
|
||||
l.LiteralNames = staticData.LiteralNames
|
||||
l.SymbolicNames = staticData.SymbolicNames
|
||||
l.GrammarFileName = "FilterQuery.g4"
|
||||
// TODO: l.EOF = antlr.TokenEOF
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// FilterQueryLexer tokens.
|
||||
const (
|
||||
FilterQueryLexerLPAREN = 1
|
||||
FilterQueryLexerRPAREN = 2
|
||||
FilterQueryLexerLBRACK = 3
|
||||
FilterQueryLexerRBRACK = 4
|
||||
FilterQueryLexerCOMMA = 5
|
||||
FilterQueryLexerEQUALS = 6
|
||||
FilterQueryLexerNOT_EQUALS = 7
|
||||
FilterQueryLexerNEQ = 8
|
||||
FilterQueryLexerLT = 9
|
||||
FilterQueryLexerLE = 10
|
||||
FilterQueryLexerGT = 11
|
||||
FilterQueryLexerGE = 12
|
||||
FilterQueryLexerLIKE = 13
|
||||
FilterQueryLexerNOT_LIKE = 14
|
||||
FilterQueryLexerILIKE = 15
|
||||
FilterQueryLexerNOT_ILIKE = 16
|
||||
FilterQueryLexerBETWEEN = 17
|
||||
FilterQueryLexerEXISTS = 18
|
||||
FilterQueryLexerREGEXP = 19
|
||||
FilterQueryLexerCONTAINS = 20
|
||||
FilterQueryLexerIN = 21
|
||||
FilterQueryLexerNOT = 22
|
||||
FilterQueryLexerAND = 23
|
||||
FilterQueryLexerOR = 24
|
||||
FilterQueryLexerHAS = 25
|
||||
FilterQueryLexerHASANY = 26
|
||||
FilterQueryLexerHASALL = 27
|
||||
FilterQueryLexerHASNONE = 28
|
||||
FilterQueryLexerBOOL = 29
|
||||
FilterQueryLexerNUMBER = 30
|
||||
FilterQueryLexerQUOTED_TEXT = 31
|
||||
FilterQueryLexerKEY = 32
|
||||
FilterQueryLexerWS = 33
|
||||
FilterQueryLexerFREETEXT = 34
|
||||
)
|
||||
112
pkg/parser/grammar/filterquery_listener.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
// FilterQueryListener is a complete listener for a parse tree produced by FilterQueryParser.
|
||||
type FilterQueryListener interface {
|
||||
antlr.ParseTreeListener
|
||||
|
||||
// EnterQuery is called when entering the query production.
|
||||
EnterQuery(c *QueryContext)
|
||||
|
||||
// EnterExpression is called when entering the expression production.
|
||||
EnterExpression(c *ExpressionContext)
|
||||
|
||||
// EnterOrExpression is called when entering the orExpression production.
|
||||
EnterOrExpression(c *OrExpressionContext)
|
||||
|
||||
// EnterAndExpression is called when entering the andExpression production.
|
||||
EnterAndExpression(c *AndExpressionContext)
|
||||
|
||||
// EnterUnaryExpression is called when entering the unaryExpression production.
|
||||
EnterUnaryExpression(c *UnaryExpressionContext)
|
||||
|
||||
// EnterPrimary is called when entering the primary production.
|
||||
EnterPrimary(c *PrimaryContext)
|
||||
|
||||
// EnterComparison is called when entering the comparison production.
|
||||
EnterComparison(c *ComparisonContext)
|
||||
|
||||
// EnterInClause is called when entering the inClause production.
|
||||
EnterInClause(c *InClauseContext)
|
||||
|
||||
// EnterNotInClause is called when entering the notInClause production.
|
||||
EnterNotInClause(c *NotInClauseContext)
|
||||
|
||||
// EnterValueList is called when entering the valueList production.
|
||||
EnterValueList(c *ValueListContext)
|
||||
|
||||
// EnterFullText is called when entering the fullText production.
|
||||
EnterFullText(c *FullTextContext)
|
||||
|
||||
// EnterFunctionCall is called when entering the functionCall production.
|
||||
EnterFunctionCall(c *FunctionCallContext)
|
||||
|
||||
// EnterFunctionParamList is called when entering the functionParamList production.
|
||||
EnterFunctionParamList(c *FunctionParamListContext)
|
||||
|
||||
// EnterFunctionParam is called when entering the functionParam production.
|
||||
EnterFunctionParam(c *FunctionParamContext)
|
||||
|
||||
// EnterArray is called when entering the array production.
|
||||
EnterArray(c *ArrayContext)
|
||||
|
||||
// EnterValue is called when entering the value production.
|
||||
EnterValue(c *ValueContext)
|
||||
|
||||
// EnterKey is called when entering the key production.
|
||||
EnterKey(c *KeyContext)
|
||||
|
||||
// ExitQuery is called when exiting the query production.
|
||||
ExitQuery(c *QueryContext)
|
||||
|
||||
// ExitExpression is called when exiting the expression production.
|
||||
ExitExpression(c *ExpressionContext)
|
||||
|
||||
// ExitOrExpression is called when exiting the orExpression production.
|
||||
ExitOrExpression(c *OrExpressionContext)
|
||||
|
||||
// ExitAndExpression is called when exiting the andExpression production.
|
||||
ExitAndExpression(c *AndExpressionContext)
|
||||
|
||||
// ExitUnaryExpression is called when exiting the unaryExpression production.
|
||||
ExitUnaryExpression(c *UnaryExpressionContext)
|
||||
|
||||
// ExitPrimary is called when exiting the primary production.
|
||||
ExitPrimary(c *PrimaryContext)
|
||||
|
||||
// ExitComparison is called when exiting the comparison production.
|
||||
ExitComparison(c *ComparisonContext)
|
||||
|
||||
// ExitInClause is called when exiting the inClause production.
|
||||
ExitInClause(c *InClauseContext)
|
||||
|
||||
// ExitNotInClause is called when exiting the notInClause production.
|
||||
ExitNotInClause(c *NotInClauseContext)
|
||||
|
||||
// ExitValueList is called when exiting the valueList production.
|
||||
ExitValueList(c *ValueListContext)
|
||||
|
||||
// ExitFullText is called when exiting the fullText production.
|
||||
ExitFullText(c *FullTextContext)
|
||||
|
||||
// ExitFunctionCall is called when exiting the functionCall production.
|
||||
ExitFunctionCall(c *FunctionCallContext)
|
||||
|
||||
// ExitFunctionParamList is called when exiting the functionParamList production.
|
||||
ExitFunctionParamList(c *FunctionParamListContext)
|
||||
|
||||
// ExitFunctionParam is called when exiting the functionParam production.
|
||||
ExitFunctionParam(c *FunctionParamContext)
|
||||
|
||||
// ExitArray is called when exiting the array production.
|
||||
ExitArray(c *ArrayContext)
|
||||
|
||||
// ExitValue is called when exiting the value production.
|
||||
ExitValue(c *ValueContext)
|
||||
|
||||
// ExitKey is called when exiting the key production.
|
||||
ExitKey(c *KeyContext)
|
||||
}
|
||||
3539
pkg/parser/grammar/filterquery_parser.go
Normal file
61
pkg/parser/grammar/filterquery_visitor.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
// A complete Visitor for a parse tree produced by FilterQueryParser.
|
||||
type FilterQueryVisitor interface {
|
||||
antlr.ParseTreeVisitor
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#query.
|
||||
VisitQuery(ctx *QueryContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#expression.
|
||||
VisitExpression(ctx *ExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#orExpression.
|
||||
VisitOrExpression(ctx *OrExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#andExpression.
|
||||
VisitAndExpression(ctx *AndExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#unaryExpression.
|
||||
VisitUnaryExpression(ctx *UnaryExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#primary.
|
||||
VisitPrimary(ctx *PrimaryContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#comparison.
|
||||
VisitComparison(ctx *ComparisonContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#inClause.
|
||||
VisitInClause(ctx *InClauseContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#notInClause.
|
||||
VisitNotInClause(ctx *NotInClauseContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#valueList.
|
||||
VisitValueList(ctx *ValueListContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#fullText.
|
||||
VisitFullText(ctx *FullTextContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#functionCall.
|
||||
VisitFunctionCall(ctx *FunctionCallContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#functionParamList.
|
||||
VisitFunctionParamList(ctx *FunctionParamListContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#functionParam.
|
||||
VisitFunctionParam(ctx *FunctionParamContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#array.
|
||||
VisitArray(ctx *ArrayContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#value.
|
||||
VisitValue(ctx *ValueContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#key.
|
||||
VisitKey(ctx *KeyContext) interface{}
|
||||
}
|
||||
22
pkg/query-service/Dockerfile.multi-arch
Normal file
@@ -0,0 +1,22 @@
|
||||
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
|
||||
|
||||
FROM alpine@sha256:${ALPINE_SHA}
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG ARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY ./target/${OS}-${ARCH}/signoz-community /root/signoz-community
|
||||
COPY ./conf/prometheus.yml /root/config/prometheus.yml
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz-community
|
||||
|
||||
ENTRYPOINT ["./signoz-community"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/mailru/easyjson"
|
||||
"github.com/uptrace/bun"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -40,6 +41,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logs"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/resource"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/traces/smart"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/traces/tracedetail"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
@@ -6791,3 +6793,262 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, metric
|
||||
|
||||
return cachedMetadata, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error) {
|
||||
searchSpansResult := []model.SearchSpansResult{
|
||||
{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError", "StatusMessage", "StatusCodeString", "SpanKind"},
|
||||
IsSubTree: false,
|
||||
Events: make([][]interface{}, 0),
|
||||
},
|
||||
}
|
||||
|
||||
var traceSummary model.TraceSummary
|
||||
summaryQuery := fmt.Sprintf("SELECT * from %s.%s WHERE trace_id=$1", r.TraceDB, r.traceSummaryTable)
|
||||
err := r.db.QueryRow(ctx, summaryQuery, params.TraceID).Scan(&traceSummary.TraceID, &traceSummary.Start, &traceSummary.End, &traceSummary.NumSpans)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
|
||||
if traceSummary.NumSpans > uint64(params.MaxSpansInTrace) {
|
||||
zap.L().Error("Max spans allowed in a trace limit reached", zap.Int("MaxSpansInTrace", params.MaxSpansInTrace),
|
||||
zap.Uint64("Count", traceSummary.NumSpans))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": traceSummary.NumSpans,
|
||||
"maxSpansInTraceLimit": params.MaxSpansInTrace,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_MAX_SPANS_ALLOWED_LIMIT_REACHED, data, claims.Email, true, false)
|
||||
}
|
||||
return nil, fmt.Errorf("max spans allowed in trace limit reached, please contact support for more details")
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": traceSummary.NumSpans,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_TRACE_DETAIL_API, data, claims.Email, true, false)
|
||||
}
|
||||
|
||||
var startTime, endTime, durationNano uint64
|
||||
var searchScanResponses []model.SpanItemV2
|
||||
|
||||
query := fmt.Sprintf("SELECT timestamp, duration_nano, span_id, trace_id, has_error, kind, resource_string_service$$name, name, references, attributes_string, attributes_number, attributes_bool, resources_string, events, status_message, status_code_string, kind_string FROM %s.%s WHERE trace_id=$1 and ts_bucket_start>=$2 and ts_bucket_start<=$3", r.TraceDB, r.traceTableName)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
err = r.db.Select(ctx, &searchScanResponses, query, params.TraceID, strconv.FormatInt(traceSummary.Start.Unix()-1800, 10), strconv.FormatInt(traceSummary.End.Unix(), 10))
|
||||
|
||||
zap.L().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
end := time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery took: ", zap.Duration("duration", end.Sub(start)))
|
||||
|
||||
searchSpansResult[0].Events = make([][]interface{}, len(searchScanResponses))
|
||||
|
||||
searchSpanResponses := []model.SearchSpanResponseItem{}
|
||||
start = time.Now()
|
||||
for _, item := range searchScanResponses {
|
||||
ref := []model.OtelSpanRef{}
|
||||
err := json.Unmarshal([]byte(item.References), &ref)
|
||||
if err != nil {
|
||||
zap.L().Error("Error unmarshalling references", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// merge attributes_number and attributes_bool to attributes_string
|
||||
for k, v := range item.Attributes_bool {
|
||||
item.Attributes_string[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
for k, v := range item.Attributes_number {
|
||||
item.Attributes_string[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
for k, v := range item.Resources_string {
|
||||
item.Attributes_string[k] = v
|
||||
}
|
||||
|
||||
jsonItem := model.SearchSpanResponseItem{
|
||||
SpanID: item.SpanID,
|
||||
TraceID: item.TraceID,
|
||||
ServiceName: item.ServiceName,
|
||||
Name: item.Name,
|
||||
Kind: int32(item.Kind),
|
||||
DurationNano: int64(item.DurationNano),
|
||||
HasError: item.HasError,
|
||||
StatusMessage: item.StatusMessage,
|
||||
StatusCodeString: item.StatusCodeString,
|
||||
SpanKind: item.SpanKind,
|
||||
References: ref,
|
||||
Events: item.Events,
|
||||
TagMap: item.Attributes_string,
|
||||
}
|
||||
|
||||
jsonItem.TimeUnixNano = uint64(item.TimeUnixNano.UnixNano() / 1000000)
|
||||
|
||||
searchSpanResponses = append(searchSpanResponses, jsonItem)
|
||||
if startTime == 0 || jsonItem.TimeUnixNano < startTime {
|
||||
startTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if endTime == 0 || jsonItem.TimeUnixNano > endTime {
|
||||
endTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if durationNano == 0 || uint64(jsonItem.DurationNano) > durationNano {
|
||||
durationNano = uint64(jsonItem.DurationNano)
|
||||
}
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery unmarshal took: ", zap.Duration("duration", end.Sub(start)))
|
||||
|
||||
if len(searchScanResponses) > params.SpansRenderLimit {
|
||||
start = time.Now()
|
||||
searchSpansResult, err = smart.SmartTraceAlgorithm(searchSpanResponses, params.SpanID, params.LevelUp, params.LevelDown, params.SpansRenderLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("smartTraceAlgo took: ", zap.Duration("duration", end.Sub(start)))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": len(searchScanResponses),
|
||||
"spansRenderLimit": params.SpansRenderLimit,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LARGE_TRACE_OPENED, data, claims.Email, true, false)
|
||||
}
|
||||
} else {
|
||||
for i, item := range searchSpanResponses {
|
||||
spanEvents := item.GetValues()
|
||||
searchSpansResult[0].Events[i] = spanEvents
|
||||
}
|
||||
}
|
||||
|
||||
searchSpansResult[0].StartTimestampMillis = startTime - (durationNano / 1000000)
|
||||
searchSpansResult[0].EndTimestampMillis = endTime + (durationNano / 1000000)
|
||||
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error) {
|
||||
|
||||
if r.useTraceNewSchema {
|
||||
return r.SearchTracesV2(ctx, params)
|
||||
}
|
||||
|
||||
var countSpans uint64
|
||||
countQuery := fmt.Sprintf("SELECT count() as count from %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
||||
err := r.db.QueryRow(ctx, countQuery, params.TraceID).Scan(&countSpans)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
|
||||
if countSpans > uint64(params.MaxSpansInTrace) {
|
||||
zap.L().Error("Max spans allowed in a trace limit reached", zap.Int("MaxSpansInTrace", params.MaxSpansInTrace),
|
||||
zap.Uint64("Count", countSpans))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": countSpans,
|
||||
"maxSpansInTraceLimit": params.MaxSpansInTrace,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_MAX_SPANS_ALLOWED_LIMIT_REACHED, data, claims.Email, true, false)
|
||||
}
|
||||
return nil, fmt.Errorf("max spans allowed in trace limit reached, please contact support for more details")
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": countSpans,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_TRACE_DETAIL_API, data, claims.Email, true, false)
|
||||
}
|
||||
|
||||
var startTime, endTime, durationNano uint64
|
||||
var searchScanResponses []model.SearchSpanDBResponseItem
|
||||
|
||||
query := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
err = r.db.Select(ctx, &searchScanResponses, query, params.TraceID)
|
||||
|
||||
zap.L().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
end := time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery took: ", zap.Duration("duration", end.Sub(start)))
|
||||
searchSpansResult := []model.SearchSpansResult{{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError", "StatusMessage", "StatusCodeString", "SpanKind"},
|
||||
Events: make([][]interface{}, len(searchScanResponses)),
|
||||
IsSubTree: false,
|
||||
},
|
||||
}
|
||||
|
||||
searchSpanResponses := []model.SearchSpanResponseItem{}
|
||||
start = time.Now()
|
||||
for _, item := range searchScanResponses {
|
||||
var jsonItem model.SearchSpanResponseItem
|
||||
easyjson.Unmarshal([]byte(item.Model), &jsonItem)
|
||||
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano() / 1000000)
|
||||
searchSpanResponses = append(searchSpanResponses, jsonItem)
|
||||
if startTime == 0 || jsonItem.TimeUnixNano < startTime {
|
||||
startTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if endTime == 0 || jsonItem.TimeUnixNano > endTime {
|
||||
endTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if durationNano == 0 || uint64(jsonItem.DurationNano) > durationNano {
|
||||
durationNano = uint64(jsonItem.DurationNano)
|
||||
}
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery unmarshal took: ", zap.Duration("duration", end.Sub(start)))
|
||||
|
||||
if len(searchScanResponses) > params.SpansRenderLimit {
|
||||
start = time.Now()
|
||||
searchSpansResult, err = smart.SmartTraceAlgorithm(searchSpanResponses, params.SpanID, params.LevelUp, params.LevelDown, params.SpansRenderLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("smartTraceAlgo took: ", zap.Duration("duration", end.Sub(start)))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": len(searchScanResponses),
|
||||
"spansRenderLimit": params.SpansRenderLimit,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LARGE_TRACE_OPENED, data, claims.Email, true, false)
|
||||
}
|
||||
} else {
|
||||
for i, item := range searchSpanResponses {
|
||||
spanEvents := item.GetValues()
|
||||
searchSpansResult[0].Events[i] = spanEvents
|
||||
}
|
||||
}
|
||||
|
||||
searchSpansResult[0].StartTimestampMillis = startTime - (durationNano / 1000000)
|
||||
searchSpansResult[0].EndTimestampMillis = endTime + (durationNano / 1000000)
|
||||
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
@@ -27,7 +28,7 @@ func validateCloudProviderName(name string) *model.ApiError {
|
||||
|
||||
type Controller struct {
|
||||
accountsRepo cloudProviderAccountsRepository
|
||||
serviceConfigRepo serviceConfigRepository
|
||||
serviceConfigRepo ServiceConfigDatabase
|
||||
}
|
||||
|
||||
func NewController(sqlStore sqlstore.SQLStore) (
|
||||
@@ -200,7 +201,7 @@ type AgentCheckInResponse struct {
|
||||
type IntegrationConfigForAgent struct {
|
||||
EnabledRegions []string `json:"enabled_regions"`
|
||||
|
||||
TelemetryCollectionStrategy *CloudTelemetryCollectionStrategy `json:"telemetry,omitempty"`
|
||||
TelemetryCollectionStrategy *CompiledCollectionStrategy `json:"telemetry,omitempty"`
|
||||
}
|
||||
|
||||
func (c *Controller) CheckInAsAgent(
|
||||
@@ -239,7 +240,7 @@ func (c *Controller) CheckInAsAgent(
|
||||
}
|
||||
|
||||
// prepare and return integration config to be consumed by agent
|
||||
telemetryCollectionStrategy, err := NewCloudTelemetryCollectionStrategy(cloudProvider)
|
||||
compliedStrategy, err := NewCompiledCollectionStrategy(cloudProvider)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't init telemetry collection strategy: %w", err,
|
||||
@@ -248,21 +249,17 @@ func (c *Controller) CheckInAsAgent(
|
||||
|
||||
agentConfig := IntegrationConfigForAgent{
|
||||
EnabledRegions: []string{},
|
||||
TelemetryCollectionStrategy: telemetryCollectionStrategy,
|
||||
TelemetryCollectionStrategy: compliedStrategy,
|
||||
}
|
||||
|
||||
if account.Config != nil && account.Config.EnabledRegions != nil {
|
||||
agentConfig.EnabledRegions = account.Config.EnabledRegions
|
||||
}
|
||||
|
||||
services, apiErr := listCloudProviderServices(cloudProvider)
|
||||
services, apiErr := services.Map(cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
|
||||
}
|
||||
svcDetailsById := map[string]*CloudServiceDetails{}
|
||||
for _, svcDetails := range services {
|
||||
svcDetailsById[svcDetails.Id] = &svcDetails
|
||||
}
|
||||
|
||||
svcConfigs, apiErr := c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *account.CloudAccountId,
|
||||
@@ -278,22 +275,16 @@ func (c *Controller) CheckInAsAgent(
|
||||
slices.Sort(configuredSvcIds)
|
||||
|
||||
for _, svcId := range configuredSvcIds {
|
||||
svcDetails := svcDetailsById[svcId]
|
||||
svcConfig := svcConfigs[svcId]
|
||||
definition, ok := services[svcId]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
config := svcConfigs[svcId]
|
||||
|
||||
if svcDetails != nil {
|
||||
metricsEnabled := svcConfig.Metrics != nil && svcConfig.Metrics.Enabled
|
||||
logsEnabled := svcConfig.Logs != nil && svcConfig.Logs.Enabled
|
||||
if logsEnabled || metricsEnabled {
|
||||
err := agentConfig.TelemetryCollectionStrategy.AddServiceStrategy(
|
||||
svcDetails.TelemetryCollectionStrategy, logsEnabled, metricsEnabled,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't add service telemetry collection strategy: %w", err,
|
||||
))
|
||||
}
|
||||
}
|
||||
err := AddServiceStrategy(compliedStrategy, definition.Strategy, config)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(
|
||||
fmt.Errorf("couldn't add service telemetry collection strategy: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -355,7 +346,7 @@ func (c *Controller) DisconnectAccount(
|
||||
}
|
||||
|
||||
type ListServicesResponse struct {
|
||||
Services []CloudServiceSummary `json:"services"`
|
||||
Services []ServiceSummary `json:"services"`
|
||||
}
|
||||
|
||||
func (c *Controller) ListServices(
|
||||
@@ -363,17 +354,16 @@ func (c *Controller) ListServices(
|
||||
cloudProvider string,
|
||||
cloudAccountId *string,
|
||||
) (*ListServicesResponse, *model.ApiError) {
|
||||
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
services, apiErr := listCloudProviderServices(cloudProvider)
|
||||
definitions, apiErr := services.List(cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
|
||||
}
|
||||
|
||||
svcConfigs := map[string]*CloudServiceConfig{}
|
||||
svcConfigs := map[string]*ServiceConfig{}
|
||||
if cloudAccountId != nil {
|
||||
svcConfigs, apiErr = c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *cloudAccountId,
|
||||
@@ -385,9 +375,11 @@ func (c *Controller) ListServices(
|
||||
}
|
||||
}
|
||||
|
||||
summaries := []CloudServiceSummary{}
|
||||
for _, s := range services {
|
||||
summary := s.CloudServiceSummary
|
||||
summaries := []ServiceSummary{}
|
||||
for _, def := range definitions {
|
||||
summary := ServiceSummary{
|
||||
Metadata: def.Metadata,
|
||||
}
|
||||
summary.Config = svcConfigs[summary.Id]
|
||||
|
||||
summaries = append(summaries, summary)
|
||||
@@ -403,17 +395,21 @@ func (c *Controller) GetServiceDetails(
|
||||
cloudProvider string,
|
||||
serviceId string,
|
||||
cloudAccountId *string,
|
||||
) (*CloudServiceDetails, *model.ApiError) {
|
||||
) (*ServiceDetails, *model.ApiError) {
|
||||
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
service, apiErr := getCloudProviderService(cloudProvider, serviceId)
|
||||
definition, apiErr := services.GetServiceDefinition(cloudProvider, serviceId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
details := ServiceDetails{
|
||||
Definition: *definition,
|
||||
}
|
||||
|
||||
if cloudAccountId != nil {
|
||||
config, apiErr := c.serviceConfigRepo.get(
|
||||
ctx, cloudProvider, *cloudAccountId, serviceId,
|
||||
@@ -423,15 +419,15 @@ func (c *Controller) GetServiceDetails(
|
||||
}
|
||||
|
||||
if config != nil {
|
||||
service.Config = config
|
||||
details.Config = config
|
||||
|
||||
if config.Metrics != nil && config.Metrics.Enabled {
|
||||
// add links to service dashboards, making them clickable.
|
||||
for i, d := range service.Assets.Dashboards {
|
||||
for i, d := range details.Assets.Dashboards {
|
||||
dashboardUuid := c.dashboardUuid(
|
||||
cloudProvider, serviceId, d.Id,
|
||||
)
|
||||
service.Assets.Dashboards[i].Url = fmt.Sprintf(
|
||||
details.Assets.Dashboards[i].Url = fmt.Sprintf(
|
||||
"/dashboard/%s", dashboardUuid,
|
||||
)
|
||||
}
|
||||
@@ -439,17 +435,17 @@ func (c *Controller) GetServiceDetails(
|
||||
}
|
||||
}
|
||||
|
||||
return service, nil
|
||||
return &details, nil
|
||||
}
|
||||
|
||||
type UpdateServiceConfigRequest struct {
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config CloudServiceConfig `json:"config"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config ServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
type UpdateServiceConfigResponse struct {
|
||||
Id string `json:"id"`
|
||||
Config CloudServiceConfig `json:"config"`
|
||||
Id string `json:"id"`
|
||||
Config ServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateServiceConfig(
|
||||
@@ -458,7 +454,6 @@ func (c *Controller) UpdateServiceConfig(
|
||||
serviceId string,
|
||||
req UpdateServiceConfigRequest,
|
||||
) (*UpdateServiceConfigResponse, *model.ApiError) {
|
||||
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -472,7 +467,7 @@ func (c *Controller) UpdateServiceConfig(
|
||||
}
|
||||
|
||||
// can only update config for a valid service.
|
||||
_, apiErr = getCloudProviderService(cloudProvider, serviceId)
|
||||
_, apiErr = services.GetServiceDefinition(cloudProvider, serviceId)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "unsupported service")
|
||||
}
|
||||
@@ -540,7 +535,7 @@ func (c *Controller) AvailableDashboardsForCloudProvider(
|
||||
}
|
||||
}
|
||||
|
||||
allServices, apiErr := listCloudProviderServices(cloudProvider)
|
||||
allServices, apiErr := services.List(cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
@@ -182,8 +182,8 @@ func TestConfigureService(t *testing.T) {
|
||||
require.NotNil(testConnectedAccount.CloudAccountId)
|
||||
require.Equal(testCloudAccountId, *testConnectedAccount.CloudAccountId)
|
||||
|
||||
testSvcConfig := CloudServiceConfig{
|
||||
Metrics: &CloudServiceMetricsConfig{
|
||||
testSvcConfig := ServiceConfig{
|
||||
Metrics: &MetricsConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -6,9 +6,22 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
)
|
||||
|
||||
type ServiceSummary struct {
|
||||
services.Metadata
|
||||
|
||||
Config *ServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
type ServiceDetails struct {
|
||||
services.Definition
|
||||
|
||||
Config *ServiceConfig `json:"config"`
|
||||
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// Represents a cloud provider account for cloud integrations
|
||||
type AccountRecord struct {
|
||||
CloudProvider string `json:"cloud_provider" db:"cloud_provider"`
|
||||
@@ -118,39 +131,13 @@ func (a *AccountRecord) account() Account {
|
||||
return ca
|
||||
}
|
||||
|
||||
type CloudServiceSummary struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Icon string `json:"icon"`
|
||||
|
||||
// Present only if the service has been configured in the
|
||||
// context of a cloud provider account.
|
||||
Config *CloudServiceConfig `json:"config,omitempty"`
|
||||
type ServiceConfig struct {
|
||||
Logs *LogsConfig `json:"logs,omitempty"`
|
||||
Metrics *MetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
type CloudServiceDetails struct {
|
||||
CloudServiceSummary
|
||||
|
||||
Overview string `json:"overview"` // markdown
|
||||
|
||||
Assets CloudServiceAssets `json:"assets"`
|
||||
|
||||
SupportedSignals SupportedSignals `json:"supported_signals"`
|
||||
|
||||
DataCollected DataCollectedForService `json:"data_collected"`
|
||||
|
||||
ConnectionStatus *CloudServiceConnectionStatus `json:"status,omitempty"`
|
||||
|
||||
TelemetryCollectionStrategy *CloudTelemetryCollectionStrategy `json:"telemetry_collection_strategy"`
|
||||
}
|
||||
|
||||
type CloudServiceConfig struct {
|
||||
Logs *CloudServiceLogsConfig `json:"logs,omitempty"`
|
||||
Metrics *CloudServiceMetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *CloudServiceConfig) Scan(src any) error {
|
||||
// Serialization from db
|
||||
func (c *ServiceConfig) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
@@ -159,8 +146,8 @@ func (c *CloudServiceConfig) Scan(src any) error {
|
||||
return json.Unmarshal(data, &c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
// Serializing to db
|
||||
func (c *ServiceConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -174,51 +161,16 @@ func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type CloudServiceLogsConfig struct {
|
||||
type LogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
S3Buckets map[string][]string `json:"s3_buckets,omitempty"`
|
||||
}
|
||||
|
||||
type MetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceMetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceAssets struct {
|
||||
Dashboards []CloudServiceDashboard `json:"dashboards"`
|
||||
}
|
||||
|
||||
type CloudServiceDashboard struct {
|
||||
Id string `json:"id"`
|
||||
Url string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Image string `json:"image"`
|
||||
Definition *types.DashboardData `json:"definition,omitempty"`
|
||||
}
|
||||
|
||||
type SupportedSignals struct {
|
||||
Logs bool `json:"logs"`
|
||||
Metrics bool `json:"metrics"`
|
||||
}
|
||||
|
||||
type DataCollectedForService struct {
|
||||
Logs []CollectedLogAttribute `json:"logs"`
|
||||
Metrics []CollectedMetric `json:"metrics"`
|
||||
}
|
||||
|
||||
type CollectedLogAttribute struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type CollectedMetric struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Unit string `json:"unit"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type CloudServiceConnectionStatus struct {
|
||||
type ServiceConnectionStatus struct {
|
||||
Logs *SignalConnectionStatus `json:"logs"`
|
||||
Metrics *SignalConnectionStatus `json:"metrics"`
|
||||
}
|
||||
@@ -228,23 +180,14 @@ type SignalConnectionStatus struct {
|
||||
LastReceivedFrom string `json:"last_received_from"` // resource identifier
|
||||
}
|
||||
|
||||
type CloudTelemetryCollectionStrategy struct {
|
||||
Provider string `json:"provider"`
|
||||
type CompiledCollectionStrategy = services.CollectionStrategy
|
||||
|
||||
AWSMetrics *AWSMetricsCollectionStrategy `json:"aws_metrics,omitempty"`
|
||||
AWSLogs *AWSLogsCollectionStrategy `json:"aws_logs,omitempty"`
|
||||
}
|
||||
|
||||
func NewCloudTelemetryCollectionStrategy(provider string) (*CloudTelemetryCollectionStrategy, error) {
|
||||
func NewCompiledCollectionStrategy(provider string) (*CompiledCollectionStrategy, error) {
|
||||
if provider == "aws" {
|
||||
return &CloudTelemetryCollectionStrategy{
|
||||
Provider: "aws",
|
||||
AWSMetrics: &AWSMetricsCollectionStrategy{
|
||||
CloudwatchMetricsStreamFilters: []CloudwatchMetricStreamFilter{},
|
||||
},
|
||||
AWSLogs: &AWSLogsCollectionStrategy{
|
||||
CloudwatchLogsSubscriptions: []CloudwatchLogsSubscriptionConfig{},
|
||||
},
|
||||
return &CompiledCollectionStrategy{
|
||||
Provider: "aws",
|
||||
AWSMetrics: &services.AWSMetricsStrategy{},
|
||||
AWSLogs: &services.AWSLogsStrategy{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -252,24 +195,27 @@ func NewCloudTelemetryCollectionStrategy(provider string) (*CloudTelemetryCollec
|
||||
}
|
||||
|
||||
// Helper for accumulating strategies for enabled services.
|
||||
func (cs *CloudTelemetryCollectionStrategy) AddServiceStrategy(
|
||||
svcStrategy *CloudTelemetryCollectionStrategy,
|
||||
logsEnabled bool,
|
||||
metricsEnabled bool,
|
||||
) error {
|
||||
if svcStrategy.Provider != cs.Provider {
|
||||
func AddServiceStrategy(cs *CompiledCollectionStrategy,
|
||||
definitionStrat *services.CollectionStrategy, config *ServiceConfig) error {
|
||||
if definitionStrat.Provider != cs.Provider {
|
||||
return fmt.Errorf(
|
||||
"can't add %s service strategy to strategy for %s",
|
||||
svcStrategy.Provider, cs.Provider,
|
||||
definitionStrat.Provider, cs.Provider,
|
||||
)
|
||||
}
|
||||
|
||||
if cs.Provider == "aws" {
|
||||
if logsEnabled {
|
||||
cs.AWSLogs.AddServiceStrategy(svcStrategy.AWSLogs)
|
||||
if config.Logs != nil && config.Logs.Enabled && definitionStrat.AWSLogs != nil {
|
||||
cs.AWSLogs.Subscriptions = append(
|
||||
cs.AWSLogs.Subscriptions,
|
||||
definitionStrat.AWSLogs.Subscriptions...,
|
||||
)
|
||||
}
|
||||
if metricsEnabled {
|
||||
cs.AWSMetrics.AddServiceStrategy(svcStrategy.AWSMetrics)
|
||||
if config.Metrics != nil && config.Metrics.Enabled && definitionStrat.AWSMetrics != nil {
|
||||
cs.AWSMetrics.StreamFilters = append(
|
||||
cs.AWSMetrics.StreamFilters,
|
||||
definitionStrat.AWSMetrics.StreamFilters...,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -277,57 +223,3 @@ func (cs *CloudTelemetryCollectionStrategy) AddServiceStrategy(
|
||||
return fmt.Errorf("unsupported cloud provider: %s", cs.Provider)
|
||||
|
||||
}
|
||||
|
||||
type AWSMetricsCollectionStrategy struct {
|
||||
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
|
||||
CloudwatchMetricsStreamFilters []CloudwatchMetricStreamFilter `json:"cloudwatch_metric_stream_filters"`
|
||||
}
|
||||
|
||||
type CloudwatchMetricStreamFilter struct {
|
||||
// json tags here are in the shape expected by AWS API as detailed at
|
||||
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricNames []string `json:"MetricNames,omitempty"`
|
||||
}
|
||||
|
||||
func (amc *AWSMetricsCollectionStrategy) AddServiceStrategy(
|
||||
svcStrategy *AWSMetricsCollectionStrategy,
|
||||
) error {
|
||||
if svcStrategy == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
amc.CloudwatchMetricsStreamFilters = append(
|
||||
amc.CloudwatchMetricsStreamFilters,
|
||||
svcStrategy.CloudwatchMetricsStreamFilters...,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
type AWSLogsCollectionStrategy struct {
|
||||
CloudwatchLogsSubscriptions []CloudwatchLogsSubscriptionConfig `json:"cloudwatch_logs_subscriptions"`
|
||||
}
|
||||
|
||||
type CloudwatchLogsSubscriptionConfig struct {
|
||||
// subscribe to all logs groups with specified prefix.
|
||||
// eg: `/aws/rds/`
|
||||
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
|
||||
// "" implies no filtering is required.
|
||||
FilterPattern string `json:"filter_pattern"`
|
||||
}
|
||||
|
||||
func (alc *AWSLogsCollectionStrategy) AddServiceStrategy(
|
||||
svcStrategy *AWSLogsCollectionStrategy,
|
||||
) error {
|
||||
if svcStrategy == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
alc.CloudwatchLogsSubscriptions = append(
|
||||
alc.CloudwatchLogsSubscriptions,
|
||||
svcStrategy.CloudwatchLogsSubscriptions...,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,28 +9,28 @@ import (
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type serviceConfigRepository interface {
|
||||
type ServiceConfigDatabase interface {
|
||||
get(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
) (*CloudServiceConfig, *model.ApiError)
|
||||
) (*ServiceConfig, *model.ApiError)
|
||||
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config CloudServiceConfig,
|
||||
) (*CloudServiceConfig, *model.ApiError)
|
||||
config ServiceConfig,
|
||||
) (*ServiceConfig, *model.ApiError)
|
||||
|
||||
getAllForAccount(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
) (
|
||||
configsBySvcId map[string]*CloudServiceConfig,
|
||||
configsBySvcId map[string]*ServiceConfig,
|
||||
apiErr *model.ApiError,
|
||||
)
|
||||
}
|
||||
@@ -52,9 +52,9 @@ func (r *serviceConfigSQLRepository) get(
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
) (*CloudServiceConfig, *model.ApiError) {
|
||||
) (*ServiceConfig, *model.ApiError) {
|
||||
|
||||
var result CloudServiceConfig
|
||||
var result ServiceConfig
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
@@ -68,17 +68,13 @@ func (r *serviceConfigSQLRepository) get(
|
||||
`,
|
||||
cloudProvider, cloudAccountId, serviceId,
|
||||
)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(
|
||||
fmt.Errorf("couldn't find %s %s config for %s", cloudProvider, serviceId, cloudAccountId))
|
||||
}
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find %s %s config for %s",
|
||||
cloudProvider, serviceId, cloudAccountId,
|
||||
))
|
||||
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud service config: %w", err,
|
||||
))
|
||||
return nil, model.InternalError(fmt.Errorf("couldn't query cloud service config: %w", err))
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
@@ -90,8 +86,8 @@ func (r *serviceConfigSQLRepository) upsert(
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config CloudServiceConfig,
|
||||
) (*CloudServiceConfig, *model.ApiError) {
|
||||
config ServiceConfig,
|
||||
) (*ServiceConfig, *model.ApiError) {
|
||||
|
||||
query := `
|
||||
INSERT INTO cloud_integrations_service_configs (
|
||||
@@ -128,11 +124,11 @@ func (r *serviceConfigSQLRepository) getAllForAccount(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
) (map[string]*CloudServiceConfig, *model.ApiError) {
|
||||
) (map[string]*ServiceConfig, *model.ApiError) {
|
||||
|
||||
type ScannedServiceConfigRecord struct {
|
||||
ServiceId string `db:"service_id"`
|
||||
Config CloudServiceConfig `db:"config_json"`
|
||||
ServiceId string `db:"service_id"`
|
||||
Config ServiceConfig `db:"config_json"`
|
||||
}
|
||||
|
||||
records := []ScannedServiceConfigRecord{}
|
||||
@@ -155,7 +151,7 @@ func (r *serviceConfigSQLRepository) getAllForAccount(
|
||||
))
|
||||
}
|
||||
|
||||
result := map[string]*CloudServiceConfig{}
|
||||
result := map[string]*ServiceConfig{}
|
||||
|
||||
for _, r := range records {
|
||||
result[r.ServiceId] = &r.Config
|
||||
|
Before Width: | Height: | Size: 125 KiB After Width: | Height: | Size: 125 KiB |
|
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 1.1 KiB |
|
Before Width: | Height: | Size: 198 KiB After Width: | Height: | Size: 198 KiB |
|
Before Width: | Height: | Size: 2.4 KiB After Width: | Height: | Size: 2.4 KiB |
|
Before Width: | Height: | Size: 94 KiB After Width: | Height: | Size: 94 KiB |
|
Before Width: | Height: | Size: 805 B After Width: | Height: | Size: 805 B |
|
Before Width: | Height: | Size: 131 KiB After Width: | Height: | Size: 131 KiB |
|
Before Width: | Height: | Size: 965 B After Width: | Height: | Size: 965 B |
|
Before Width: | Height: | Size: 371 KiB After Width: | Height: | Size: 371 KiB |
|
Before Width: | Height: | Size: 6.0 KiB After Width: | Height: | Size: 6.0 KiB |
|
Before Width: | Height: | Size: 63 KiB After Width: | Height: | Size: 63 KiB |
|
Before Width: | Height: | Size: 2.7 KiB After Width: | Height: | Size: 2.7 KiB |
@@ -0,0 +1 @@
|
||||
<svg width="256" height="256" viewBox="0 0 256 256" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" preserveAspectRatio="xMidYMid"><title>AWS Simple Storage Service (S3)</title><defs><linearGradient x1="0%" y1="100%" x2="100%" y2="0%" id="a"><stop stop-color="#1B660F" offset="0%"/><stop stop-color="#6CAE3E" offset="100%"/></linearGradient></defs><g><rect fill="url(#a)" width="256" height="256"/><path d="M194.67488,137.25632 L195.90368,128.60352 C207.23488,135.39072 207.38208,138.19392 207.378964,138.27072 C207.35968,138.28672 205.42688,139.89952 194.67488,137.25632 L194.67488,137.25632 Z M188.45728,135.52832 C168.87328,129.60192 141.59968,117.08992 130.56288,111.87392 C130.56288,111.82912 130.57568,111.78752 130.57568,111.74272 C130.57568,107.50272 127.12608,104.05312 122.88288,104.05312 C118.64608,104.05312 115.19648,107.50272 115.19648,111.74272 C115.19648,115.98272 118.64608,119.43232 122.88288,119.43232 C124.74528,119.43232 126.43488,118.73792 127.76928,117.63392 C140.75488,123.78112 167.81728,136.11072 187.54528,141.93472 L179.74368,196.99392 C179.72128,197.14432 179.71168,197.29472 179.71168,197.44512 C179.71168,202.29312 158.24928,211.19872 123.18048,211.19872 C87.74048,211.19872 66.05088,202.29312 66.05088,197.44512 C66.05088,197.29792 66.04128,197.15392 66.02208,197.00992 L49.72128,77.94752 C63.83008,87.65952 94.17568,92.79872 123.19968,92.79872 C152.17888,92.79872 182.47328,87.67872 196.61088,77.99552 L188.45728,135.52832 Z M47.99968,65.52832 C48.23008,61.31712 72.42848,44.79872 123.19968,44.79872 C173.96448,44.79872 198.16608,61.31392 198.39968,65.52832 L198.39968,66.96512 C195.61568,76.40832 164.25568,86.39872 123.19968,86.39872 C82.07328,86.39872 50.69728,76.37632 47.99968,66.92032 L47.99968,65.52832 Z M204.79968,65.59872 C204.79968,54.51072 173.01088,38.39872 123.19968,38.39872 C73.38848,38.39872 41.59968,54.51072 41.59968,65.59872 L41.90048,68.01152 L59.65408,197.68832 C60.07968,212.19072 98.75488,217.59872 123.18048,217.59872 C153.49088,217.59872 185.69248,210.62912 186.10848,197.69792 L193.77568,143.62752 C198.04128,144.64832 201.55168,145.16992 204.37088,145.16992 C208.15648,145.16992 210.71648,144.24512 212.26848,142.39552 C213.54208,140.87872 214.02848,139.04192 213.66368,137.08672 C212.83488,132.65792 207.57728,127.88352 196.87008,121.77472 L204.47328,68.13632 L204.79968,65.59872 Z" fill="#FFFFFF"/></g></svg>
|
||||
|
After Width: | Height: | Size: 2.3 KiB |
@@ -0,0 +1,49 @@
|
||||
{
|
||||
"id": "s3sync",
|
||||
"title": "S3 Sync",
|
||||
"icon": "file://icon.svg",
|
||||
"overview": "file://overview.md",
|
||||
"supported_signals": {
|
||||
"metrics": false,
|
||||
"logs": true
|
||||
},
|
||||
"data_collected": {
|
||||
"logs": [
|
||||
{
|
||||
"name": "Account ID",
|
||||
"path": "resources.aws.account.id",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "Account Region",
|
||||
"path": "resources.aws.account.region",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "Bucket Name",
|
||||
"path": "resources.aws.bucket.name",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "Object Key",
|
||||
"path": "attributes.aws.object.key",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "Object S3 Event Time",
|
||||
"path": "attributes.aws.object.event_time",
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "Log line number in source file",
|
||||
"path": "attributes.log.source.line",
|
||||
"type": "number"
|
||||
}
|
||||
]
|
||||
},
|
||||
"telemetry_collection_strategy": {
|
||||
},
|
||||
"assets": {
|
||||
"dashboards": []
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
### Sync logs stored in AWS S3 with SigNoz
|
||||
|
||||
Collect logs stored by AWS Services in S3 and explore them in Signoz.
|
||||
94
pkg/query-service/app/cloudintegrations/services/models.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
)
|
||||
|
||||
type Metadata struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Icon string `json:"icon"`
|
||||
|
||||
// Present only if the service has been configured in the
|
||||
// context of a cloud provider account.
|
||||
// Config *CloudServiceConfig `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
type Definition struct {
|
||||
Metadata
|
||||
|
||||
Overview string `json:"overview"` // markdown
|
||||
|
||||
Assets Assets `json:"assets"`
|
||||
|
||||
SupportedSignals SupportedSignals `json:"supported_signals"`
|
||||
|
||||
DataCollected DataCollected `json:"data_collected"`
|
||||
|
||||
Strategy *CollectionStrategy `json:"telemetry_collection_strategy"`
|
||||
}
|
||||
|
||||
type Assets struct {
|
||||
Dashboards []Dashboard `json:"dashboards"`
|
||||
}
|
||||
|
||||
type SupportedSignals struct {
|
||||
Logs bool `json:"logs"`
|
||||
Metrics bool `json:"metrics"`
|
||||
}
|
||||
|
||||
type DataCollected struct {
|
||||
Logs []CollectedLogAttribute `json:"logs"`
|
||||
Metrics []CollectedMetric `json:"metrics"`
|
||||
}
|
||||
|
||||
type CollectedLogAttribute struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type CollectedMetric struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Unit string `json:"unit"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
type CollectionStrategy struct {
|
||||
Provider string `json:"provider"`
|
||||
|
||||
AWSMetrics *AWSMetricsStrategy `json:"aws_metrics,omitempty"`
|
||||
AWSLogs *AWSLogsStrategy `json:"aws_logs,omitempty"`
|
||||
}
|
||||
|
||||
type AWSMetricsStrategy struct {
|
||||
// to be used as https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudwatch-metricstream.html#cfn-cloudwatch-metricstream-includefilters
|
||||
StreamFilters []struct {
|
||||
// json tags here are in the shape expected by AWS API as detailed at
|
||||
// https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-metricstream-metricstreamfilter.html
|
||||
Namespace string `json:"Namespace"`
|
||||
MetricNames []string `json:"MetricNames,omitempty"`
|
||||
} `json:"cloudwatch_metric_stream_filters"`
|
||||
}
|
||||
|
||||
type AWSLogsStrategy struct {
|
||||
Subscriptions []struct {
|
||||
// subscribe to all logs groups with specified prefix.
|
||||
// eg: `/aws/rds/`
|
||||
LogGroupNamePrefix string `json:"log_group_name_prefix"`
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/FilterAndPatternSyntax.html
|
||||
// "" implies no filtering is required.
|
||||
FilterPattern string `json:"filter_pattern"`
|
||||
} `json:"cloudwatch_logs_subscriptions"`
|
||||
}
|
||||
|
||||
type Dashboard struct {
|
||||
Id string `json:"id"`
|
||||
Url string `json:"url"`
|
||||
Title string `json:"title"`
|
||||
Description string `json:"description"`
|
||||
Image string `json:"image"`
|
||||
Definition *types.DashboardData `json:"definition,omitempty"`
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package cloudintegrations
|
||||
package services
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -15,11 +15,11 @@ import (
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func listCloudProviderServices(
|
||||
func List(
|
||||
cloudProvider string,
|
||||
) ([]CloudServiceDetails, *model.ApiError) {
|
||||
cloudServices := availableServices[cloudProvider]
|
||||
if cloudServices == nil {
|
||||
) ([]Definition, *model.ApiError) {
|
||||
cloudServices, found := supportedServices[cloudProvider]
|
||||
if !found || cloudServices == nil {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"unsupported cloud provider: %s", cloudProvider,
|
||||
))
|
||||
@@ -33,10 +33,21 @@ func listCloudProviderServices(
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func getCloudProviderService(
|
||||
func Map(cloudprovider string) (map[string]Definition, *model.ApiError) {
|
||||
cloudServices, found := supportedServices[cloudprovider]
|
||||
if !found || cloudServices == nil {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"unsupported cloud provider: %s", cloudprovider,
|
||||
))
|
||||
}
|
||||
|
||||
return cloudServices, nil
|
||||
}
|
||||
|
||||
func GetServiceDefinition(
|
||||
cloudProvider string, serviceId string,
|
||||
) (*CloudServiceDetails, *model.ApiError) {
|
||||
cloudServices := availableServices[cloudProvider]
|
||||
) (*Definition, *model.ApiError) {
|
||||
cloudServices := supportedServices[cloudProvider]
|
||||
if cloudServices == nil {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"unsupported cloud provider: %s", cloudProvider,
|
||||
@@ -57,7 +68,7 @@ func getCloudProviderService(
|
||||
|
||||
// Service details read from ./serviceDefinitions
|
||||
// { "providerName": { "service_id": {...}} }
|
||||
var availableServices map[string]map[string]CloudServiceDetails
|
||||
var supportedServices map[string]map[string]Definition
|
||||
|
||||
func init() {
|
||||
err := readAllServiceDefinitions()
|
||||
@@ -68,15 +79,15 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
//go:embed serviceDefinitions/*
|
||||
var serviceDefinitionFiles embed.FS
|
||||
//go:embed definitions/*
|
||||
var definitionFiles embed.FS
|
||||
|
||||
func readAllServiceDefinitions() error {
|
||||
availableServices = map[string]map[string]CloudServiceDetails{}
|
||||
supportedServices = map[string]map[string]Definition{}
|
||||
|
||||
rootDirName := "serviceDefinitions"
|
||||
rootDirName := "definitions"
|
||||
|
||||
cloudProviderDirs, err := fs.ReadDir(serviceDefinitionFiles, rootDirName)
|
||||
cloudProviderDirs, err := fs.ReadDir(definitionFiles, rootDirName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't read dirs in %s: %w", rootDirName, err)
|
||||
}
|
||||
@@ -98,21 +109,21 @@ func readAllServiceDefinitions() error {
|
||||
return fmt.Errorf("no %s services could be read", cloudProvider)
|
||||
}
|
||||
|
||||
availableServices[cloudProvider] = cloudServices
|
||||
supportedServices[cloudProvider] = cloudServices
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath string) (
|
||||
map[string]CloudServiceDetails, error,
|
||||
map[string]Definition, error,
|
||||
) {
|
||||
svcDefDirs, err := fs.ReadDir(serviceDefinitionFiles, cloudProviderDirPath)
|
||||
svcDefDirs, err := fs.ReadDir(definitionFiles, cloudProviderDirPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't list integrations dirs: %w", err)
|
||||
}
|
||||
|
||||
svcDefs := map[string]CloudServiceDetails{}
|
||||
svcDefs := map[string]Definition{}
|
||||
|
||||
for _, d := range svcDefDirs {
|
||||
if !d.IsDir() {
|
||||
@@ -137,10 +148,10 @@ func readServiceDefinitionsFromDir(cloudProvider string, cloudProviderDirPath st
|
||||
return svcDefs, nil
|
||||
}
|
||||
|
||||
func readServiceDefinition(cloudProvider string, svcDirpath string) (*CloudServiceDetails, error) {
|
||||
func readServiceDefinition(cloudProvider string, svcDirpath string) (*Definition, error) {
|
||||
integrationJsonPath := path.Join(svcDirpath, "integration.json")
|
||||
|
||||
serializedSpec, err := serviceDefinitionFiles.ReadFile(integrationJsonPath)
|
||||
serializedSpec, err := definitionFiles.ReadFile(integrationJsonPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't find integration.json in %s: %w",
|
||||
@@ -157,7 +168,7 @@ func readServiceDefinition(cloudProvider string, svcDirpath string) (*CloudServi
|
||||
}
|
||||
|
||||
hydrated, err := integrations.HydrateFileUris(
|
||||
integrationSpec, serviceDefinitionFiles, svcDirpath,
|
||||
integrationSpec, definitionFiles, svcDirpath,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
@@ -167,7 +178,7 @@ func readServiceDefinition(cloudProvider string, svcDirpath string) (*CloudServi
|
||||
}
|
||||
hydratedSpec := hydrated.(map[string]any)
|
||||
|
||||
serviceDef, err := ParseStructWithJsonTagsFromMap[CloudServiceDetails](hydratedSpec)
|
||||
serviceDef, err := ParseStructWithJsonTagsFromMap[Definition](hydratedSpec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't parse hydrated JSON spec read from %s: %w",
|
||||
@@ -180,13 +191,13 @@ func readServiceDefinition(cloudProvider string, svcDirpath string) (*CloudServi
|
||||
return nil, fmt.Errorf("invalid service definition %s: %w", serviceDef.Id, err)
|
||||
}
|
||||
|
||||
serviceDef.TelemetryCollectionStrategy.Provider = cloudProvider
|
||||
serviceDef.Strategy.Provider = cloudProvider
|
||||
|
||||
return serviceDef, nil
|
||||
|
||||
}
|
||||
|
||||
func validateServiceDefinition(s *CloudServiceDetails) error {
|
||||
func validateServiceDefinition(s *Definition) error {
|
||||
// Validate dashboard data
|
||||
seenDashboardIds := map[string]interface{}{}
|
||||
for _, dd := range s.Assets.Dashboards {
|
||||
@@ -196,7 +207,7 @@ func validateServiceDefinition(s *CloudServiceDetails) error {
|
||||
seenDashboardIds[dd.Id] = nil
|
||||
}
|
||||
|
||||
if s.TelemetryCollectionStrategy == nil {
|
||||
if s.Strategy == nil {
|
||||
return fmt.Errorf("telemetry_collection_strategy is required")
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package cloudintegrations
|
||||
package services
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -11,22 +11,22 @@ func TestAvailableServices(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
// should be able to list available services.
|
||||
_, apiErr := listCloudProviderServices("bad-cloud-provider")
|
||||
_, apiErr := List("bad-cloud-provider")
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
|
||||
awsSvcs, apiErr := listCloudProviderServices("aws")
|
||||
awsSvcs, apiErr := List("aws")
|
||||
require.Nil(apiErr)
|
||||
require.Greater(len(awsSvcs), 0)
|
||||
|
||||
// should be able to get details of a service
|
||||
_, apiErr = getCloudProviderService(
|
||||
_, apiErr = GetServiceDefinition(
|
||||
"aws", "bad-service-id",
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
|
||||
svc, apiErr := getCloudProviderService(
|
||||
svc, apiErr := GetServiceDefinition(
|
||||
"aws", awsSvcs[0].Id,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -549,6 +550,7 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
router.HandleFunc("/api/v1/services/list", am.ViewAccess(aH.getServicesList)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/service/top_operations", am.ViewAccess(aH.getTopOperations)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/service/top_level_operations", am.ViewAccess(aH.getServicesTopLevelOps)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(aH.SearchTraces)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/usage", am.ViewAccess(aH.getUsage)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/dependency_graph", am.ViewAccess(aH.dependencyGraph)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/settings/ttl", am.AdminAccess(aH.setTTL)).Methods(http.MethodPost)
|
||||
@@ -1724,6 +1726,22 @@ func (aH *APIHandler) getServicesList(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) SearchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
params, err := ParseSearchTracesParams(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||
return
|
||||
}
|
||||
|
||||
result, err := aH.reader.SearchTraces(r.Context(), params)
|
||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
aH.WriteJSON(w, r, result)
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) GetWaterfallSpansForTraceWithMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
traceID := mux.Vars(r)["traceId"]
|
||||
if traceID == "" {
|
||||
@@ -3981,8 +3999,8 @@ func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
svcDetails *cloudintegrations.CloudServiceDetails,
|
||||
) (*cloudintegrations.CloudServiceConnectionStatus, *model.ApiError) {
|
||||
svcDetails *cloudintegrations.ServiceDetails,
|
||||
) (*cloudintegrations.ServiceConnectionStatus, *model.ApiError) {
|
||||
if cloudProvider != "aws" {
|
||||
// TODO(Raj): Make connection check generic for all providers in a follow up change
|
||||
return nil, model.BadRequest(
|
||||
@@ -3990,14 +4008,14 @@ func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
|
||||
)
|
||||
}
|
||||
|
||||
telemetryCollectionStrategy := svcDetails.TelemetryCollectionStrategy
|
||||
telemetryCollectionStrategy := svcDetails.Strategy
|
||||
if telemetryCollectionStrategy == nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"service doesn't have telemetry collection strategy: %s", svcDetails.Id,
|
||||
))
|
||||
}
|
||||
|
||||
result := &cloudintegrations.CloudServiceConnectionStatus{}
|
||||
result := &cloudintegrations.ServiceConnectionStatus{}
|
||||
errors := []*model.ApiError{}
|
||||
var resultLock sync.Mutex
|
||||
|
||||
@@ -4057,10 +4075,10 @@ func (aH *APIHandler) calculateCloudIntegrationServiceConnectionStatus(
|
||||
func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
|
||||
ctx context.Context,
|
||||
cloudAccountId string,
|
||||
strategy *cloudintegrations.AWSMetricsCollectionStrategy,
|
||||
metricsCollectedBySvc []cloudintegrations.CollectedMetric,
|
||||
strategy *services.AWSMetricsStrategy,
|
||||
metricsCollectedBySvc []services.CollectedMetric,
|
||||
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
|
||||
if strategy == nil || len(strategy.CloudwatchMetricsStreamFilters) < 1 {
|
||||
if strategy == nil || len(strategy.StreamFilters) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -4069,7 +4087,7 @@ func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
|
||||
"cloud_account_id": cloudAccountId,
|
||||
}
|
||||
|
||||
metricsNamespace := strategy.CloudwatchMetricsStreamFilters[0].Namespace
|
||||
metricsNamespace := strategy.StreamFilters[0].Namespace
|
||||
metricsNamespaceParts := strings.Split(metricsNamespace, "/")
|
||||
|
||||
if len(metricsNamespaceParts) >= 2 {
|
||||
@@ -4106,13 +4124,13 @@ func (aH *APIHandler) calculateAWSIntegrationSvcMetricsConnectionStatus(
|
||||
func (aH *APIHandler) calculateAWSIntegrationSvcLogsConnectionStatus(
|
||||
ctx context.Context,
|
||||
cloudAccountId string,
|
||||
strategy *cloudintegrations.AWSLogsCollectionStrategy,
|
||||
strategy *services.AWSLogsStrategy,
|
||||
) (*cloudintegrations.SignalConnectionStatus, *model.ApiError) {
|
||||
if strategy == nil || len(strategy.CloudwatchLogsSubscriptions) < 1 {
|
||||
if strategy == nil || len(strategy.Subscriptions) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
logGroupNamePrefix := strategy.CloudwatchLogsSubscriptions[0].LogGroupNamePrefix
|
||||
logGroupNamePrefix := strategy.Subscriptions[0].LogGroupNamePrefix
|
||||
if len(logGroupNamePrefix) < 1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package model
|
||||
package smart
|
||||
|
||||
type SpanForTraceDetails struct {
|
||||
TimeUnixNano uint64 `json:"timestamp"`
|
||||
@@ -15,8 +15,3 @@ type SpanForTraceDetails struct {
|
||||
HasError bool `json:"hasError"`
|
||||
Children []*SpanForTraceDetails `json:"children"`
|
||||
}
|
||||
|
||||
type GetSpansSubQueryDBResponse struct {
|
||||
SpanID string `ch:"spanID"`
|
||||
TraceID string `ch:"traceID"`
|
||||
}
|
||||
@@ -1,17 +1,16 @@
|
||||
package db
|
||||
package smart
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// SmartTraceAlgorithm is an algorithm to find the target span and build a tree of spans around it with the given levelUp and levelDown parameters and the given spanLimit
|
||||
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
|
||||
var spans []*model.SpanForTraceDetails
|
||||
var spans []*SpanForTraceDetails
|
||||
|
||||
// if targetSpanId is null or not present then randomly select a span as targetSpanId
|
||||
if (targetSpanId == "" || targetSpanId == "null") && len(payload) > 0 {
|
||||
@@ -24,7 +23,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
|
||||
parentID = spanItem.References[0].SpanId
|
||||
}
|
||||
span := &model.SpanForTraceDetails{
|
||||
span := &SpanForTraceDetails{
|
||||
TimeUnixNano: spanItem.TimeUnixNano,
|
||||
SpanID: spanItem.SpanID,
|
||||
TraceID: spanItem.TraceID,
|
||||
@@ -45,7 +44,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetSpan := &model.SpanForTraceDetails{}
|
||||
targetSpan := &SpanForTraceDetails{}
|
||||
|
||||
// Find the target span in the span trees
|
||||
for _, root := range roots {
|
||||
@@ -65,7 +64,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Build the final result
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
parents := []*SpanForTraceDetails{}
|
||||
|
||||
// Get the parent spans of the target span up to the given levelUp parameter and spanLimit
|
||||
preParent := targetSpan
|
||||
@@ -90,11 +89,11 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Get the child spans of the target span until the given levelDown and spanLimit
|
||||
preParents := []*model.SpanForTraceDetails{targetSpan}
|
||||
children := []*model.SpanForTraceDetails{}
|
||||
preParents := []*SpanForTraceDetails{targetSpan}
|
||||
children := []*SpanForTraceDetails{}
|
||||
|
||||
for i := 0; i < levelDown && len(preParents) != 0 && spanLimit > 0; i++ {
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
parents := []*SpanForTraceDetails{}
|
||||
for _, parent := range preParents {
|
||||
if spanLimit-len(parent.Children) <= 0 {
|
||||
children = append(children, parent.Children[:spanLimit]...)
|
||||
@@ -108,7 +107,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Store the final list of spans in the resultSpanSet map to avoid duplicates
|
||||
resultSpansSet := make(map[*model.SpanForTraceDetails]struct{})
|
||||
resultSpansSet := make(map[*SpanForTraceDetails]struct{})
|
||||
resultSpansSet[targetSpan] = struct{}{}
|
||||
for _, parent := range parents {
|
||||
resultSpansSet[parent] = struct{}{}
|
||||
@@ -169,12 +168,12 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// buildSpanTrees builds trees of spans from a list of spans.
|
||||
func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTraceDetails, error) {
|
||||
func buildSpanTrees(spansPtr *[]*SpanForTraceDetails) ([]*SpanForTraceDetails, error) {
|
||||
|
||||
// Build a map of spanID to span for fast lookup
|
||||
var roots []*model.SpanForTraceDetails
|
||||
var roots []*SpanForTraceDetails
|
||||
spans := *spansPtr
|
||||
mapOfSpans := make(map[string]*model.SpanForTraceDetails, len(spans))
|
||||
mapOfSpans := make(map[string]*SpanForTraceDetails, len(spans))
|
||||
|
||||
for _, span := range spans {
|
||||
if span.ParentID == "" {
|
||||
@@ -206,8 +205,8 @@ func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTra
|
||||
}
|
||||
|
||||
// breadthFirstSearch performs a breadth-first search on the span tree to find the target span.
|
||||
func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*model.SpanForTraceDetails, error) {
|
||||
queue := []*model.SpanForTraceDetails{spansPtr}
|
||||
func breadthFirstSearch(spansPtr *SpanForTraceDetails, targetId string) (*SpanForTraceDetails, error) {
|
||||
queue := []*SpanForTraceDetails{spansPtr}
|
||||
visited := make(map[string]bool)
|
||||
|
||||
for len(queue) > 0 {
|
||||
@@ -68,10 +68,6 @@ func UseMetricsPreAggregation() bool {
|
||||
return GetOrDefaultEnv("USE_METRICS_PRE_AGGREGATION", "true") == "true"
|
||||
}
|
||||
|
||||
func EnableHostsInfraMonitoring() bool {
|
||||
return GetOrDefaultEnv("ENABLE_INFRA_METRICS", "true") == "true"
|
||||
}
|
||||
|
||||
var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "false")
|
||||
|
||||
var DEFAULT_FEATURE_SET = model.FeatureSet{
|
||||
|
||||
@@ -32,13 +32,7 @@ func (fm *FeatureManager) CheckFeature(featureKey string) error {
|
||||
|
||||
// GetFeatureFlags returns current features
|
||||
func (fm *FeatureManager) GetFeatureFlags() (model.FeatureSet, error) {
|
||||
features := append(constants.DEFAULT_FEATURE_SET, model.Feature{
|
||||
Name: model.OSS,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
})
|
||||
features := constants.DEFAULT_FEATURE_SET
|
||||
return features, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ type Reader interface {
|
||||
GetNextPrevErrorIDs(ctx context.Context, params *model.GetErrorParams) (*model.NextPrevErrorIDs, *model.ApiError)
|
||||
|
||||
// Search Interfaces
|
||||
SearchTraces(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error)
|
||||
GetWaterfallSpansForTraceWithMetadata(ctx context.Context, traceID string, req *model.GetWaterfallSpansForTraceWithMetadataParams) (*model.GetWaterfallSpansForTraceWithMetadataResponse, *model.ApiError)
|
||||
GetFlamegraphSpansForTrace(ctx context.Context, traceID string, req *model.GetFlamegraphSpansForTraceParams) (*model.GetFlamegraphSpansForTraceResponse, *model.ApiError)
|
||||
|
||||
|
||||
@@ -9,58 +9,11 @@ type Feature struct {
|
||||
Route string `db:"route" json:"route"`
|
||||
}
|
||||
|
||||
const CustomMetricsFunction = "CUSTOM_METRICS_FUNCTION"
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
const OSS = "OSS"
|
||||
const QueryBuilderPanels = "QUERY_BUILDER_PANELS"
|
||||
const QueryBuilderAlerts = "QUERY_BUILDER_ALERTS"
|
||||
const UseSpanMetrics = "USE_SPAN_METRICS"
|
||||
const AlertChannelSlack = "ALERT_CHANNEL_SLACK"
|
||||
const AlertChannelWebhook = "ALERT_CHANNEL_WEBHOOK"
|
||||
const AlertChannelPagerduty = "ALERT_CHANNEL_PAGERDUTY"
|
||||
const AlertChannelMsTeams = "ALERT_CHANNEL_MSTEAMS"
|
||||
const AlertChannelOpsgenie = "ALERT_CHANNEL_OPSGENIE"
|
||||
const AlertChannelEmail = "ALERT_CHANNEL_EMAIL"
|
||||
const AnomalyDetection = "ANOMALY_DETECTION"
|
||||
const HostsInfraMonitoring = "HOSTS_INFRA_MONITORING"
|
||||
const TraceFunnels = "TRACE_FUNNELS"
|
||||
|
||||
var BasicPlan = FeatureSet{
|
||||
Feature{
|
||||
Name: OSS,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: DisableUpsell,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: CustomMetricsFunction,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: UseSpanMetrics,
|
||||
Active: false,
|
||||
@@ -68,48 +21,6 @@ var BasicPlan = FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelMsTeams,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AnomalyDetection,
|
||||
Active: false,
|
||||
|
||||
@@ -51,6 +51,7 @@ func NewTestSqliteDB(t *testing.T) (sqlStore sqlstore.SQLStore, testDBFilePath s
|
||||
sqlmigration.NewUpdateDashboardAndSavedViewsFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePatAndOrgDomainsFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePipelines(sqlStore),
|
||||
sqlmigration.NewAddVirtualFieldsFactory(),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -69,6 +69,7 @@ func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedM
|
||||
sqlmigration.NewUpdatePreferencesFactory(sqlstore),
|
||||
sqlmigration.NewUpdateApdexTtlFactory(sqlstore),
|
||||
sqlmigration.NewUpdateResetPasswordFactory(sqlstore),
|
||||
sqlmigration.NewAddVirtualFieldsFactory(),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
58
pkg/sqlmigration/025_add_virtual_fields.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type addVirtualFields struct{}
|
||||
|
||||
func NewAddVirtualFieldsFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_virtual_fields"), newAddVirtualFields)
|
||||
}
|
||||
|
||||
func newAddVirtualFields(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
|
||||
return &addVirtualFields{}, nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Up(ctx context.Context, db *bun.DB) error {
|
||||
// table:virtual_field op:create
|
||||
if _, err := db.NewCreateTable().
|
||||
Model(&struct {
|
||||
bun.BaseModel `bun:"table:virtual_field"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
Name string `bun:"name,type:text,notnull"`
|
||||
Expression string `bun:"expression,type:text,notnull"`
|
||||
Description string `bun:"description,type:text"`
|
||||
Signal telemetrytypes.Signal `bun:"signal,type:text,notnull"`
|
||||
OrgID string `bun:"org_id,type:text,notnull"`
|
||||
}{}).
|
||||
ForeignKey(`("org_id") REFERENCES "organizations" ("id") ON DELETE CASCADE`).
|
||||
IfNotExists().
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
275
pkg/telemetrylogs/condition_builder.go
Normal file
@@ -0,0 +1,275 @@
|
||||
package telemetrylogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
logsV2Columns = map[string]*schema.Column{
|
||||
"ts_bucket_start": {Name: "ts_bucket_start", Type: schema.ColumnTypeUInt64},
|
||||
"resource_fingerprint": {Name: "resource_fingerprint", Type: schema.ColumnTypeString},
|
||||
|
||||
"timestamp": {Name: "timestamp", Type: schema.ColumnTypeUInt64},
|
||||
"observed_timestamp": {Name: "observed_timestamp", Type: schema.ColumnTypeUInt64},
|
||||
"id": {Name: "id", Type: schema.ColumnTypeString},
|
||||
"trace_id": {Name: "trace_id", Type: schema.ColumnTypeString},
|
||||
"span_id": {Name: "span_id", Type: schema.ColumnTypeString},
|
||||
"trace_flags": {Name: "trace_flags", Type: schema.ColumnTypeUInt32},
|
||||
"severity_text": {Name: "severity_text", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"severity_number": {Name: "severity_number", Type: schema.ColumnTypeUInt8},
|
||||
"body": {Name: "body", Type: schema.ColumnTypeString},
|
||||
"attributes_string": {Name: "attributes_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes_number": {Name: "attributes_number", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}},
|
||||
"attributes_bool": {Name: "attributes_bool", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}},
|
||||
"resources_string": {Name: "resources_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"scope_name": {Name: "scope_name", Type: schema.ColumnTypeString},
|
||||
"scope_version": {Name: "scope_version", Type: schema.ColumnTypeString},
|
||||
"scope_string": {Name: "scope_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
var _ qbtypes.ConditionBuilder = &conditionBuilder{}
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return logsV2Columns["resources_string"], nil
|
||||
case telemetrytypes.FieldContextScope:
|
||||
switch key.Name {
|
||||
case "name", "scope.name", "scope_name":
|
||||
return logsV2Columns["scope_name"], nil
|
||||
case "version", "scope.version", "scope_version":
|
||||
return logsV2Columns["scope_version"], nil
|
||||
}
|
||||
return logsV2Columns["scope_string"], nil
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
switch key.FieldDataType {
|
||||
case telemetrytypes.FieldDataTypeString:
|
||||
return logsV2Columns["attributes_string"], nil
|
||||
case telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeNumber:
|
||||
return logsV2Columns["attributes_number"], nil
|
||||
case telemetrytypes.FieldDataTypeBool:
|
||||
return logsV2Columns["attributes_bool"], nil
|
||||
}
|
||||
case telemetrytypes.FieldContextLog:
|
||||
col, ok := logsV2Columns[key.Name]
|
||||
if !ok {
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
return col, nil
|
||||
}
|
||||
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeUInt8:
|
||||
return column.Name, nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
// should not reach here
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThan:
|
||||
return sb.G(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThanOrEq:
|
||||
return sb.GE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThan:
|
||||
return sb.LT(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThanOrEq:
|
||||
return sb.LE(tblFieldName, value), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.Like(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.NotLike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.ILike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.NotILike(tblFieldName, value), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
// between and not between
|
||||
case qbtypes.FilterOperatorBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.Between(tblFieldName, values[0], values[1]), nil
|
||||
case qbtypes.FilterOperatorNotBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.NotBetween(tblFieldName, values[0], values[1]), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.In(tblFieldName, values...), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.NotIn(tblFieldName, values...), nil
|
||||
|
||||
// exists and not exists
|
||||
// but how could you live and have no story to tell
|
||||
// in the UI based query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
var value any
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString, schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}:
|
||||
value = ""
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.ColumnTypeUInt64, schema.ColumnTypeUInt32, schema.ColumnTypeUInt8:
|
||||
value = 0
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if key.Materialized {
|
||||
leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
|
||||
}
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("exists operator is not supported for column type %s", column.Type)
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("unsupported operator: %v", operator)
|
||||
}
|
||||
620
pkg/telemetrylogs/condition_builder_test.go
Normal file
@@ -0,0 +1,620 @@
|
||||
package telemetrylogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetColumn(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedCol *schema.Column
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Resource field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedCol: logsV2Columns["resources_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope.name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope.name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope_name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope_name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - version",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "version",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_version"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - other scope field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "custom.scope.field",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - string type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - number type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - int64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeInt64,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - float64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "cpu.utilization",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - bool type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_bool"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: logsV2Columns["timestamp"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: logsV2Columns["body"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - nonexistent",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "did_user_login",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "did_user_login",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_bool"],
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
col, err := conditionBuilder.GetColumn(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedCol, col)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Simple column type - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "timestamp",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes_string['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes_number['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes_bool['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resources_string['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "error message",
|
||||
expectedSQL: "body = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotEqual,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThan,
|
||||
value: float64(100),
|
||||
expectedSQL: "attributes_number['request.duration'] > ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThan,
|
||||
value: float64(1024),
|
||||
expectedSQL: "attributes_number['request.size'] < ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp >= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Like operator - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLike,
|
||||
value: "%error%",
|
||||
expectedSQL: "body LIKE ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Like operator - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotLike,
|
||||
value: "%error%",
|
||||
expectedSQL: "body NOT LIKE ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) NOT LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Contains operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorContains,
|
||||
value: "admin",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: "invalid",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Between operator - insufficient values",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000)},
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Not Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp NOT BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - severity_text",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: []any{"error", "fatal", "critical"},
|
||||
expectedSQL: "severity_text IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: "error",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrInValues,
|
||||
},
|
||||
{
|
||||
name: "Not In operator - severity_text",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotIn,
|
||||
value: []any{"debug", "info", "trace"},
|
||||
expectedSQL: "severity_text NOT IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - string field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "body <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - string field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "body = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - number field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "value",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConditionMultiple(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
keys []*telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
keys: []*telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "error message",
|
||||
expectedSQL: "body = ? AND severity_text = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var err error
|
||||
for _, key := range tc.keys {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting condition for key %s: %v", key.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
9
pkg/telemetrylogs/tables.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package telemetrylogs
|
||||
|
||||
const (
|
||||
DBName = "signoz_logs"
|
||||
LogsV2TableName = "distributed_logs_v2"
|
||||
LogsV2LocalTableName = "logs_v2"
|
||||
TagAttributesV2TableName = "distributed_tag_attributes_v2"
|
||||
TagAttributesV2LocalTableName = "tag_attributes_v2"
|
||||
)
|
||||
149
pkg/telemetrymetadata/condition_builder.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
attributeMetadataColumns = map[string]*schema.Column{
|
||||
"resource_attributes": {Name: "resource_attributes", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes": {Name: "attributes", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return attributeMetadataColumns["resource_attributes"], nil
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
return attributeMetadataColumns["attributes"], nil
|
||||
}
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
// if we don't have a column, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
// if we don't have a table field name, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if key.FieldDataType != telemetrytypes.FieldDataTypeString {
|
||||
// if the field data type is not string, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// key must exists to apply main filter
|
||||
containsExp := fmt.Sprintf("mapContains(%s, %s)", column.Name, sb.Var(key.Name))
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.And(containsExp, sb.E(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.And(containsExp, sb.NE(tblFieldName, value)), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.And(containsExp, sb.Like(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.And(containsExp, sb.NotLike(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.And(containsExp, sb.ILike(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.And(containsExp, sb.NotILike(tblFieldName, value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.And(containsExp, sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value))), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.And(containsExp, sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value))), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(containsExp, exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(containsExp, exp), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.And(containsExp, sb.In(tblFieldName, values...)), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.And(containsExp, sb.NotIn(tblFieldName, values...)), nil
|
||||
|
||||
// exists and not exists
|
||||
// in the query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
switch column.Type {
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
272
pkg/telemetrymetadata/condition_builder_test.go
Normal file
@@ -0,0 +1,272 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetColumn(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedCol *schema.Column
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Resource field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["resource_attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope.name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope.name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope_name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope_name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - version",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "version",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - other scope field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "custom.scope.field",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - string type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - number type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - int64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeInt64,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - float64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "cpu.utilization",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - nonexistent",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
col, err := conditionBuilder.GetColumn(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedCol, col)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resource_attributes['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE (mapContains(attributes, ?) AND LOWER(attributes['user.id']) LIKE LOWER(?))",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE (mapContains(attributes, ?) AND LOWER(attributes['user.id']) NOT LIKE LOWER(?))",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
691
pkg/telemetrymetadata/metadata.go
Normal file
@@ -0,0 +1,691 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFailedToGetTracesKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get traces keys")
|
||||
ErrFailedToGetLogsKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get logs keys")
|
||||
ErrFailedToGetTblStatement = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get tbl statement")
|
||||
ErrFailedToGetMetricsKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get metrics keys")
|
||||
ErrFailedToGetRelatedValues = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get related values")
|
||||
)
|
||||
|
||||
type telemetryMetaStore struct {
|
||||
telemetrystore telemetrystore.TelemetryStore
|
||||
tracesDBName string
|
||||
tracesFieldsTblName string
|
||||
indexV3TblName string
|
||||
metricsDBName string
|
||||
metricsFieldsTblName string
|
||||
timeseries1WTblName string
|
||||
logsDBName string
|
||||
logsFieldsTblName string
|
||||
logsV2TblName string
|
||||
relatedMetadataDBName string
|
||||
relatedMetadataTblName string
|
||||
|
||||
conditionBuilder qbtypes.ConditionBuilder
|
||||
}
|
||||
|
||||
func NewTelemetryMetaStore(
|
||||
telemetrystore telemetrystore.TelemetryStore,
|
||||
tracesDBName string,
|
||||
tracesFieldsTblName string,
|
||||
indexV3TblName string,
|
||||
metricsDBName string,
|
||||
metricsFieldsTblName string,
|
||||
timeseries1WTblName string,
|
||||
logsDBName string,
|
||||
logsV2TblName string,
|
||||
logsFieldsTblName string,
|
||||
relatedMetadataDBName string,
|
||||
relatedMetadataTblName string,
|
||||
) (telemetrytypes.MetadataStore, error) {
|
||||
return &telemetryMetaStore{
|
||||
telemetrystore: telemetrystore,
|
||||
tracesDBName: tracesDBName,
|
||||
tracesFieldsTblName: tracesFieldsTblName,
|
||||
indexV3TblName: indexV3TblName,
|
||||
metricsDBName: metricsDBName,
|
||||
metricsFieldsTblName: metricsFieldsTblName,
|
||||
timeseries1WTblName: timeseries1WTblName,
|
||||
logsDBName: logsDBName,
|
||||
logsV2TblName: logsV2TblName,
|
||||
logsFieldsTblName: logsFieldsTblName,
|
||||
relatedMetadataDBName: relatedMetadataDBName,
|
||||
relatedMetadataTblName: relatedMetadataTblName,
|
||||
|
||||
conditionBuilder: NewConditionBuilder(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// tracesTblStatementToFieldKeys returns materialised attribute/resource/scope keys from the traces table
|
||||
func (t *telemetryMetaStore) tracesTblStatementToFieldKeys(ctx context.Context) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", t.tracesDBName, t.indexV3TblName)
|
||||
statements := []telemetrytypes.ShowCreateTableStatement{}
|
||||
err := t.telemetrystore.ClickhouseDB().Select(ctx, &statements, query)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTblStatement.Error())
|
||||
}
|
||||
|
||||
return ExtractFieldKeysFromTblStatement(statements[0].Statement)
|
||||
}
|
||||
|
||||
// getTracesKeys returns the keys from the spans that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// pre-fetch the materialised keys from the traces table
|
||||
matKeys, err := t.tracesTblStatementToFieldKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapOfKeys := make(map[string]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range matKeys {
|
||||
mapOfKeys[key.Name+";"+key.FieldContext.StringValue()+";"+key.FieldDataType.StringValue()] = key
|
||||
}
|
||||
|
||||
sb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", `
|
||||
CASE
|
||||
WHEN tag_type = 'spanfield' THEN 1
|
||||
WHEN tag_type = 'resource' THEN 2
|
||||
WHEN tag_type = 'scope' THEN 3
|
||||
WHEN tag_type = 'tag' THEN 4
|
||||
ELSE 5
|
||||
END as priority`).From(t.tracesDBName + "." + t.tracesFieldsTblName)
|
||||
var limit int
|
||||
|
||||
conds := []string{}
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
|
||||
if fieldKeySelector.StartUnixMilli != 0 {
|
||||
conds = append(conds, sb.GE("unix_milli", fieldKeySelector.StartUnixMilli))
|
||||
}
|
||||
if fieldKeySelector.EndUnixMilli != 0 {
|
||||
conds = append(conds, sb.LE("unix_milli", fieldKeySelector.EndUnixMilli))
|
||||
}
|
||||
|
||||
// key part of the selector
|
||||
fieldKeyConds := []string{}
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_key", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.Like("tag_key", "%"+fieldKeySelector.Name+"%"))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_type", fieldKeySelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_data_type", fieldKeySelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
conds = append(conds, sb.And(fieldKeyConds...))
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
}
|
||||
|
||||
mainSb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", "max(priority) as priority")
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
key, ok := mapOfKeys[name+";"+fieldContext.StringValue()+";"+fieldDataType.StringValue()]
|
||||
|
||||
// if there is no materialised column, create a key with the field context and data type
|
||||
if !ok {
|
||||
key = &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
}
|
||||
}
|
||||
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// logsTblStatementToFieldKeys returns materialised attribute/resource/scope keys from the logs table
|
||||
func (t *telemetryMetaStore) logsTblStatementToFieldKeys(ctx context.Context) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", t.logsDBName, t.logsV2TblName)
|
||||
statements := []telemetrytypes.ShowCreateTableStatement{}
|
||||
err := t.telemetrystore.ClickhouseDB().Select(ctx, &statements, query)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTblStatement.Error())
|
||||
}
|
||||
|
||||
return ExtractFieldKeysFromTblStatement(statements[0].Statement)
|
||||
}
|
||||
|
||||
// getLogsKeys returns the keys from the spans that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// pre-fetch the materialised keys from the logs table
|
||||
matKeys, err := t.logsTblStatementToFieldKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapOfKeys := make(map[string]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range matKeys {
|
||||
mapOfKeys[key.Name+";"+key.FieldContext.StringValue()+";"+key.FieldDataType.StringValue()] = key
|
||||
}
|
||||
|
||||
sb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", `
|
||||
CASE
|
||||
WHEN tag_type = 'logfield' THEN 1
|
||||
WHEN tag_type = 'resource' THEN 2
|
||||
WHEN tag_type = 'scope' THEN 3
|
||||
WHEN tag_type = 'tag' THEN 4
|
||||
ELSE 5
|
||||
END as priority`).From(t.logsDBName + "." + t.logsFieldsTblName)
|
||||
var limit int
|
||||
|
||||
conds := []string{}
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
|
||||
if fieldKeySelector.StartUnixMilli != 0 {
|
||||
conds = append(conds, sb.GE("unix_milli", fieldKeySelector.StartUnixMilli))
|
||||
}
|
||||
if fieldKeySelector.EndUnixMilli != 0 {
|
||||
conds = append(conds, sb.LE("unix_milli", fieldKeySelector.EndUnixMilli))
|
||||
}
|
||||
|
||||
// key part of the selector
|
||||
fieldKeyConds := []string{}
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_key", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.Like("tag_key", "%"+fieldKeySelector.Name+"%"))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_type", fieldKeySelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_data_type", fieldKeySelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
conds = append(conds, sb.And(fieldKeyConds...))
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
}
|
||||
|
||||
mainSb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", "max(priority) as priority")
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
key, ok := mapOfKeys[name+";"+fieldContext.StringValue()+";"+fieldDataType.StringValue()]
|
||||
|
||||
// if there is no materialised column, create a key with the field context and data type
|
||||
if !ok {
|
||||
key = &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
}
|
||||
}
|
||||
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// getMetricsKeys returns the keys from the metrics that match the field selection criteria
|
||||
// TODO(srikanthccv): update the implementation after the dot metrics migration is done
|
||||
func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var whereClause, innerWhereClause string
|
||||
var limit int
|
||||
args := []any{}
|
||||
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
if fieldKeySelector.MetricContext != nil {
|
||||
innerWhereClause += "metric_name IN ? AND"
|
||||
args = append(args, fieldKeySelector.MetricContext.MetricName)
|
||||
}
|
||||
}
|
||||
innerWhereClause += " __normalized = true"
|
||||
|
||||
for idx, fieldKeySelector := range fieldKeySelectors {
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
whereClause += "(distinctTagKey = ? AND distinctTagKey NOT LIKE '\\_\\_%%')"
|
||||
args = append(args, fieldKeySelector.Name)
|
||||
} else {
|
||||
whereClause += "(distinctTagKey ILIKE ? AND distinctTagKey NOT LIKE '\\_\\_%%')"
|
||||
args = append(args, fmt.Sprintf("%%%s%%", fieldKeySelector.Name))
|
||||
}
|
||||
if idx != len(fieldKeySelectors)-1 {
|
||||
whereClause += " OR "
|
||||
}
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
args = append(args, limit)
|
||||
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
arrayJoin(tagKeys) AS distinctTagKey
|
||||
FROM (
|
||||
SELECT JSONExtractKeys(labels) AS tagKeys
|
||||
FROM %s.%s
|
||||
WHERE `+innerWhereClause+`
|
||||
GROUP BY tagKeys
|
||||
)
|
||||
WHERE `+whereClause+`
|
||||
GROUP BY distinctTagKey
|
||||
LIMIT ?
|
||||
`, t.metricsDBName, t.timeseries1WTblName)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
err = rows.Scan(&name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
key := &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKeys(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
var keys []*telemetrytypes.TelemetryFieldKey
|
||||
var err error
|
||||
switch fieldKeySelector.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
keys, err = t.getTracesKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalLogs:
|
||||
keys, err = t.getLogsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalMetrics:
|
||||
keys, err = t.getMetricsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalUnspecified:
|
||||
// get traces keys
|
||||
tracesKeys, err := t.getTracesKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, tracesKeys...)
|
||||
|
||||
// get logs keys
|
||||
logsKeys, err := t.getLogsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, logsKeys...)
|
||||
|
||||
// get metrics keys
|
||||
metricsKeys, err := t.getMetricsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, metricsKeys...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapOfKeys := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range keys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
|
||||
return mapOfKeys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKeysMulti(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
|
||||
logsSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
tracesSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
metricsSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
switch fieldKeySelector.Signal {
|
||||
case telemetrytypes.SignalLogs:
|
||||
logsSelectors = append(logsSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalTraces:
|
||||
tracesSelectors = append(tracesSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalMetrics:
|
||||
metricsSelectors = append(metricsSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalUnspecified:
|
||||
logsSelectors = append(logsSelectors, fieldKeySelector)
|
||||
tracesSelectors = append(tracesSelectors, fieldKeySelector)
|
||||
metricsSelectors = append(metricsSelectors, fieldKeySelector)
|
||||
}
|
||||
}
|
||||
|
||||
logsKeys, err := t.getLogsKeys(ctx, logsSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tracesKeys, err := t.getTracesKeys(ctx, tracesSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metricsKeys, err := t.getMetricsKeys(ctx, metricsSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapOfKeys := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range logsKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
for _, key := range tracesKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
for _, key := range metricsKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
|
||||
return mapOfKeys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKey(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
keys, err := t.GetKeys(ctx, fieldKeySelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keys[fieldKeySelector.Name], nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
|
||||
args := []any{}
|
||||
|
||||
var andConditions []string
|
||||
|
||||
andConditions = append(andConditions, `unix_milli >= ?`)
|
||||
args = append(args, fieldValueSelector.StartUnixMilli)
|
||||
|
||||
andConditions = append(andConditions, `unix_milli <= ?`)
|
||||
args = append(args, fieldValueSelector.EndUnixMilli)
|
||||
|
||||
if len(fieldValueSelector.ExistingQuery) != 0 {
|
||||
// TODO(srikanthccv): add the existing query to the where clause
|
||||
}
|
||||
whereClause := strings.Join(andConditions, " AND ")
|
||||
|
||||
key := telemetrytypes.TelemetryFieldKey{
|
||||
Name: fieldValueSelector.Name,
|
||||
Signal: fieldValueSelector.Signal,
|
||||
FieldContext: fieldValueSelector.FieldContext,
|
||||
FieldDataType: fieldValueSelector.FieldDataType,
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): add the select column
|
||||
selectColumn, _ := t.conditionBuilder.GetTableFieldName(ctx, &key)
|
||||
|
||||
args = append(args, fieldValueSelector.Limit)
|
||||
filterSubQuery := fmt.Sprintf(
|
||||
"SELECT DISTINCT %s FROM %s.%s WHERE %s LIMIT ?",
|
||||
selectColumn,
|
||||
t.relatedMetadataDBName,
|
||||
t.relatedMetadataTblName,
|
||||
whereClause,
|
||||
)
|
||||
zap.L().Debug("filterSubQuery for related values", zap.String("query", filterSubQuery), zap.Any("args", args))
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, filterSubQuery, args...)
|
||||
if err != nil {
|
||||
return nil, ErrFailedToGetRelatedValues
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var attributeValues []string
|
||||
for rows.Next() {
|
||||
var value string
|
||||
if err := rows.Scan(&value); err != nil {
|
||||
return nil, ErrFailedToGetRelatedValues
|
||||
}
|
||||
if value != "" {
|
||||
attributeValues = append(attributeValues, value)
|
||||
}
|
||||
}
|
||||
|
||||
return attributeValues, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
return t.getRelatedValues(ctx, fieldValueSelector)
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getSpanFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// build the query to get the keys from the spans that match the field selection criteria
|
||||
var limit int
|
||||
|
||||
sb := sqlbuilder.Select("DISTINCT string_value, number_value").From(t.tracesDBName + "." + t.tracesFieldsTblName)
|
||||
|
||||
if fieldValueSelector.Name != "" {
|
||||
sb.Where(sb.E("tag_key", fieldValueSelector.Name))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldValueSelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
sb.Where(sb.E("tag_type", fieldValueSelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldValueSelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
sb.Where(sb.E("tag_data_type", fieldValueSelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.Value != "" {
|
||||
if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeString {
|
||||
sb.Where(sb.Like("string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeNumber {
|
||||
sb.Where(sb.IsNotNull("number_value"))
|
||||
sb.Where(sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"))
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
sb.Limit(limit)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
seen := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var stringValue string
|
||||
var numberValue float64
|
||||
if err := rows.Scan(&stringValue, &numberValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
if _, ok := seen[stringValue]; !ok {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getLogFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// build the query to get the keys from the spans that match the field selection criteria
|
||||
var limit int
|
||||
|
||||
sb := sqlbuilder.Select("DISTINCT string_value, number_value").From(t.logsDBName + "." + t.logsFieldsTblName)
|
||||
|
||||
if fieldValueSelector.Name != "" {
|
||||
sb.Where(sb.E("tag_key", fieldValueSelector.Name))
|
||||
}
|
||||
|
||||
if fieldValueSelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
sb.Where(sb.E("tag_type", fieldValueSelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
sb.Where(sb.E("tag_data_type", fieldValueSelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.Value != "" {
|
||||
if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeString {
|
||||
sb.Where(sb.Like("string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeNumber {
|
||||
sb.Where(sb.IsNotNull("number_value"))
|
||||
sb.Where(sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"))
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
sb.Limit(limit)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
seen := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var stringValue string
|
||||
var numberValue float64
|
||||
if err := rows.Scan(&stringValue, &numberValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
if _, ok := seen[stringValue]; !ok {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getMetricFieldValues(_ context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// TODO(srikanthccv): implement this. use new tables?
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetAllValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
var values *telemetrytypes.TelemetryFieldValues
|
||||
var err error
|
||||
switch fieldValueSelector.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
values, err = t.getSpanFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalLogs:
|
||||
values, err = t.getLogFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalMetrics:
|
||||
values, err = t.getMetricFieldValues(ctx, fieldValueSelector)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
86
pkg/telemetrymetadata/metadata_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrytraces"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
)
|
||||
|
||||
type regexMatcher struct {
|
||||
}
|
||||
|
||||
func (m *regexMatcher) Match(expectedSQL, actualSQL string) error {
|
||||
re, err := regexp.Compile(expectedSQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !re.MatchString(actualSQL) {
|
||||
return fmt.Errorf("expected query to contain %s, got %s", expectedSQL, actualSQL)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGetKeys(t *testing.T) {
|
||||
mockTelemetryStore := telemetrystoretest.New(telemetrystore.Config{}, ®exMatcher{})
|
||||
mock := mockTelemetryStore.Mock()
|
||||
|
||||
metadata, err := NewTelemetryMetaStore(
|
||||
mockTelemetryStore,
|
||||
telemetrytraces.DBName,
|
||||
telemetrytraces.TagAttributesV2TableName,
|
||||
telemetrytraces.SpanIndexV3TableName,
|
||||
telemetrymetrics.DBName,
|
||||
telemetrymetrics.TimeseriesV41weekTableName,
|
||||
telemetrymetrics.TimeseriesV41weekTableName,
|
||||
telemetrylogs.DBName,
|
||||
telemetrylogs.LogsV2TableName,
|
||||
telemetrylogs.TagAttributesV2TableName,
|
||||
DBName,
|
||||
AttributesMetadataLocalTableName,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create telemetry metadata store: %v", err)
|
||||
}
|
||||
|
||||
rows := cmock.NewRows([]cmock.ColumnType{
|
||||
{Name: "statement", Type: "String"},
|
||||
}, [][]any{{"CREATE TABLE signoz_traces.signoz_index_v3"}})
|
||||
|
||||
mock.
|
||||
ExpectSelect("SHOW CREATE TABLE signoz_traces.distributed_signoz_index_v3").
|
||||
WillReturnRows(rows)
|
||||
|
||||
query := `SELECT.*`
|
||||
|
||||
mock.ExpectQuery(query).
|
||||
WithArgs("%http.method%", telemetrytypes.FieldContextSpan.TagType(), telemetrytypes.FieldDataTypeString.TagDataType(), 10).
|
||||
WillReturnRows(cmock.NewRows([]cmock.ColumnType{
|
||||
{Name: "tag_key", Type: "String"},
|
||||
{Name: "tag_type", Type: "String"},
|
||||
{Name: "tag_data_type", Type: "String"},
|
||||
{Name: "priority", Type: "UInt8"},
|
||||
}, [][]any{{"http.method", "tag", "String", 1}, {"http.method", "tag", "String", 1}}))
|
||||
keys, err := metadata.GetKeys(context.Background(), &telemetrytypes.FieldKeySelector{
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Name: "http.method",
|
||||
Limit: 10,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get keys: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Keys: %v", keys)
|
||||
}
|
||||
132
pkg/telemetrymetadata/stmt_parse.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/AfterShip/clickhouse-sql-parser/parser"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// TelemetryFieldVisitor is an AST visitor for extracting telemetry fields
|
||||
type TelemetryFieldVisitor struct {
|
||||
parser.DefaultASTVisitor
|
||||
Fields []*telemetrytypes.TelemetryFieldKey
|
||||
}
|
||||
|
||||
func NewTelemetryFieldVisitor() *TelemetryFieldVisitor {
|
||||
return &TelemetryFieldVisitor{
|
||||
Fields: make([]*telemetrytypes.TelemetryFieldKey, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// VisitColumnDef is called when visiting a column definition
|
||||
func (v *TelemetryFieldVisitor) VisitColumnDef(expr *parser.ColumnDef) error {
|
||||
// Check if this is a materialized column with DEFAULT expression
|
||||
if expr.DefaultExpr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse column name to extract context and data type
|
||||
columnName := expr.Name.String()
|
||||
|
||||
// Remove backticks if present
|
||||
columnName = strings.TrimPrefix(columnName, "`")
|
||||
columnName = strings.TrimSuffix(columnName, "`")
|
||||
|
||||
// Parse the column name to extract components
|
||||
parts := strings.Split(columnName, "_")
|
||||
if len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
context := parts[0]
|
||||
dataType := parts[1]
|
||||
|
||||
// Check if this is a valid telemetry column
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
switch context {
|
||||
case "resource":
|
||||
fieldContext = telemetrytypes.FieldContextResource
|
||||
case "scope":
|
||||
fieldContext = telemetrytypes.FieldContextScope
|
||||
case "attribute":
|
||||
fieldContext = telemetrytypes.FieldContextAttribute
|
||||
default:
|
||||
return nil // Not a telemetry column
|
||||
}
|
||||
|
||||
// Check and convert data type
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
switch dataType {
|
||||
case "string":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeString
|
||||
case "bool":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeBool
|
||||
case "int", "int64":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
case "float", "float64":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
case "number":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
default:
|
||||
return nil // Unknown data type
|
||||
}
|
||||
|
||||
// Extract field name from the DEFAULT expression
|
||||
// The DEFAULT expression should be something like: resources_string['k8s.cluster.name']
|
||||
// We need to extract the key inside the square brackets
|
||||
defaultExprStr := expr.DefaultExpr.String()
|
||||
|
||||
// Look for the pattern: map['key']
|
||||
startIdx := strings.Index(defaultExprStr, "['")
|
||||
endIdx := strings.Index(defaultExprStr, "']")
|
||||
|
||||
if startIdx == -1 || endIdx == -1 || startIdx+2 >= endIdx {
|
||||
return nil // Invalid DEFAULT expression format
|
||||
}
|
||||
|
||||
fieldName := defaultExprStr[startIdx+2 : endIdx]
|
||||
|
||||
// Create and store the TelemetryFieldKey
|
||||
field := telemetrytypes.TelemetryFieldKey{
|
||||
Name: fieldName,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
Materialized: true,
|
||||
}
|
||||
|
||||
v.Fields = append(v.Fields, &field)
|
||||
return nil
|
||||
}
|
||||
|
||||
func ExtractFieldKeysFromTblStatement(statement string) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
// Parse the CREATE TABLE statement using the ClickHouse parser
|
||||
p := parser.NewParser(statement)
|
||||
stmts, err := p.ParseStmts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a visitor to collect telemetry fields
|
||||
visitor := NewTelemetryFieldVisitor()
|
||||
|
||||
// Visit each statement
|
||||
for _, stmt := range stmts {
|
||||
// We're looking for CreateTable statements
|
||||
createTable, ok := stmt.(*parser.CreateTable)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Visit the table schema to extract column definitions
|
||||
if createTable.TableSchema != nil {
|
||||
for _, column := range createTable.TableSchema.Columns {
|
||||
if err := column.Accept(visitor); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return visitor.Fields, nil
|
||||
}
|
||||
148
pkg/telemetrymetadata/stmt_parse_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
func TestExtractFieldKeysFromTblStatement(t *testing.T) {
|
||||
|
||||
var statement = `CREATE TABLE signoz_logs.logs_v2
|
||||
(
|
||||
` + "`ts_bucket_start`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`resource_fingerprint`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`timestamp`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`observed_timestamp`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`trace_id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`span_id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`trace_flags`" + ` UInt32,
|
||||
` + "`severity_text`" + ` LowCardinality(String) CODEC(ZSTD(1)),
|
||||
` + "`severity_number`" + ` UInt8,
|
||||
` + "`body`" + ` String CODEC(ZSTD(2)),
|
||||
` + "`attributes_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`attributes_number`" + ` Map(LowCardinality(String), Float64) CODEC(ZSTD(1)),
|
||||
` + "`attributes_bool`" + ` Map(LowCardinality(String), Bool) CODEC(ZSTD(1)),
|
||||
` + "`resources_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`scope_name`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`scope_version`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`scope_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`attribute_number_input_size`" + ` Int64 DEFAULT attributes_number['input_size'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_number_input_size_exists`" + ` Bool DEFAULT if(mapContains(attributes_number, 'input_size') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$iostream`" + ` String DEFAULT attributes_string['log.iostream'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$iostream_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'log.iostream') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$file$$path`" + ` String DEFAULT attributes_string['log.file.path'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$file$$path_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'log.file.path') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$cluster$$name`" + ` String DEFAULT resources_string['k8s.cluster.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$cluster$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.cluster.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$namespace$$name`" + ` String DEFAULT resources_string['k8s.namespace.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$namespace$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.namespace.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$pod$$name`" + ` String DEFAULT resources_string['k8s.pod.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$pod$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.pod.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$node$$name`" + ` String DEFAULT resources_string['k8s.node.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$node$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.node.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$container$$name`" + ` String DEFAULT resources_string['k8s.container.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$container$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.container.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$deployment$$name`" + ` String DEFAULT resources_string['k8s.deployment.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$deployment$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.deployment.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_processor`" + ` String DEFAULT attributes_string['processor'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_processor_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'processor') != 0, true, false) CODEC(ZSTD(1)),
|
||||
INDEX body_idx lower(body) TYPE ngrambf_v1(4, 60000, 5, 0) GRANULARITY 1,
|
||||
INDEX id_minmax id TYPE minmax GRANULARITY 1,
|
||||
INDEX severity_number_idx severity_number TYPE set(25) GRANULARITY 4,
|
||||
INDEX severity_text_idx severity_text TYPE set(25) GRANULARITY 4,
|
||||
INDEX trace_flags_idx trace_flags TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX scope_name_idx scope_name TYPE tokenbf_v1(10240, 3, 0) GRANULARITY 4,
|
||||
INDEX ` + "`resource_string_k8s$$cluster$$name_idx`" + ` ` + "`resource_string_k8s$$cluster$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$namespace$$name_idx`" + ` ` + "`resource_string_k8s$$namespace$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$pod$$name_idx`" + ` ` + "`resource_string_k8s$$pod$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$node$$name_idx`" + ` ` + "`resource_string_k8s$$node$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$container$$name_idx`" + ` ` + "`resource_string_k8s$$container$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$deployment$$name_idx`" + ` ` + "`resource_string_k8s$$deployment$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX attribute_string_processor_idx attribute_string_processor TYPE bloom_filter(0.01) GRANULARITY 64
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')
|
||||
PARTITION BY toDate(timestamp / 1000000000)
|
||||
ORDER BY (ts_bucket_start, resource_fingerprint, severity_text, timestamp, id)
|
||||
TTL toDateTime(timestamp / 1000000000) + toIntervalSecond(2592000)
|
||||
SETTINGS ttl_only_drop_parts = 1, index_granularity = 8192`
|
||||
|
||||
keys, err := ExtractFieldKeysFromTblStatement(statement)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to extract field keys from tbl statement: %v", err)
|
||||
}
|
||||
|
||||
// some expected keys
|
||||
expectedKeys := []*telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "k8s.pod.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.cluster.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.deployment.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.node.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.container.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "processor",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "input_size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "log.iostream",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "log.file.path",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, key := range expectedKeys {
|
||||
if !slices.ContainsFunc(keys, func(k *telemetrytypes.TelemetryFieldKey) bool {
|
||||
return k.Name == key.Name && k.FieldContext == key.FieldContext && k.FieldDataType == key.FieldDataType && k.Materialized == key.Materialized
|
||||
}) {
|
||||
t.Errorf("expected key %v not found", key)
|
||||
}
|
||||
}
|
||||
}
|
||||