mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-06 09:42:18 +00:00
Compare commits
21 Commits
licensing
...
issue_7376
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e1075e6b84 | ||
|
|
93b7f40a24 | ||
|
|
ba6f31b1c3 | ||
|
|
eed92978a4 | ||
|
|
41cbd316b5 | ||
|
|
8d7d33393d | ||
|
|
8d143b44b1 | ||
|
|
423aebd6eb | ||
|
|
8d630707af | ||
|
|
a5b52431b7 | ||
|
|
0138d757c8 | ||
|
|
844195b84f | ||
|
|
8ff05b2e8f | ||
|
|
c8c56c544e | ||
|
|
1c43655336 | ||
|
|
c269c8c6b8 | ||
|
|
3142b6cc6d | ||
|
|
58e141685a | ||
|
|
e17f63a50c | ||
|
|
838ef5dcc5 | ||
|
|
e53d3d1269 |
81
.github/workflows/build-community.yaml
vendored
Normal file
81
.github/workflows/build-community.yaml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: build-community
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_NAME: signoz-community
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./pkg/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: dockerhub
|
||||
113
.github/workflows/build-enterprise.yaml
vendored
Normal file
113
.github/workflows/build-enterprise.yaml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
name: build-enterprise
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
- name: set-docker-providers
|
||||
id: set-docker-providers
|
||||
run: |
|
||||
if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ || ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
|
||||
echo "providers=dockerhub gcp" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "providers=gcp" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
||||
131
.github/workflows/build-staging.yaml
vendored
Normal file
131
.github/workflows/build-staging.yaml
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
name: build-staging
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
deployment: ${{ steps.build-info.outputs.deployment }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
|
||||
staging_label="${{ github.event.label.name }}"
|
||||
if [[ "${staging_label}" == "staging:"* ]]; then
|
||||
deployment=${staging_label#"staging:"}
|
||||
elif [[ "${{ github.event.ref }}" == "refs/heads/main" ]]; then
|
||||
deployment="staging"
|
||||
else
|
||||
echo "error: not able to determine deployment - please verify the PR label or the branch"
|
||||
exit 1
|
||||
fi
|
||||
echo "deployment=${deployment}" >> $GITHUB_OUTPUT
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: gcp
|
||||
staging:
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||
secrets: inherit
|
||||
needs: [prepare, go-build]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_ENVIRONMENT: staging
|
||||
GITHUB_SILENT: true
|
||||
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
|
||||
GITHUB_EVENT_NAME: releaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"
|
||||
122
.github/workflows/build.yaml
vendored
122
.github/workflows/build.yaml
vendored
@@ -1,122 +0,0 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
enterprise:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: docker-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: create-env-file
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: github-ref-info
|
||||
shell: bash
|
||||
run: |
|
||||
GH_REF=${{ github.ref }}
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
PREFIX="refs/tags/"
|
||||
echo "GH_IS_TAG=true" >> $GITHUB_ENV
|
||||
echo "GH_TAG=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
else
|
||||
PREFIX="refs/heads/"
|
||||
echo "GH_IS_TAG=false" >> $GITHUB_ENV
|
||||
echo "GH_BRANCH_NAME=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: set-version
|
||||
run: |
|
||||
if [ '${{ env.GH_IS_TAG }}' == 'true' ]; then
|
||||
echo "VERSION=${{ env.GH_TAG }}" >> $GITHUB_ENV
|
||||
elif [ '${{ env.GH_BRANCH_NAME }}' == 'main' ]; then
|
||||
echo "VERSION=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "VERSION=${{ env.GH_BRANCH_NAME }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: cross-compilation-tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: publish
|
||||
run: make docker-buildx-enterprise
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: docker-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: github-ref-info
|
||||
shell: bash
|
||||
run: |
|
||||
GH_REF=${{ github.ref }}
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
PREFIX="refs/tags/"
|
||||
echo "GH_IS_TAG=true" >> $GITHUB_ENV
|
||||
echo "GH_TAG=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
else
|
||||
PREFIX="refs/heads/"
|
||||
echo "GH_IS_TAG=false" >> $GITHUB_ENV
|
||||
echo "GH_BRANCH_NAME=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: set-version
|
||||
run: |
|
||||
if [ '${{ env.GH_IS_TAG }}' == 'true' ]; then
|
||||
echo "VERSION=${{ env.GH_TAG }}" >> $GITHUB_ENV
|
||||
elif [ '${{ env.GH_BRANCH_NAME }}' == 'main' ]; then
|
||||
echo "VERSION=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "VERSION=${{ env.GH_BRANCH_NAME }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: cross-compilation-tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: publish
|
||||
run: make docker-buildx-community
|
||||
13
.github/workflows/staging-deployment.yaml
vendored
13
.github/workflows/staging-deployment.yaml
vendored
@@ -36,12 +36,17 @@ jobs:
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
export KAFKA_SPAN_EVAL="true"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
docker pull signoz/signoz-schema-migrator:main
|
||||
docker system prune --force --all
|
||||
OTELCOL_TAG=$(curl -s https://api.github.com/repos/SigNoz/signoz-otel-collector/releases/latest | jq -r '.tag_name // "not-found"')
|
||||
if [[ "${OTELCOL_TAG}" == "not-found" ]]; then
|
||||
echo "warning: unable to determine latest SigNoz OtelCollector release tag, skipping latest otelcol deployment"
|
||||
else
|
||||
export OTELCOL_TAG=${OTELCOL_TAG}
|
||||
docker pull signoz/signoz-otel-collector:${OTELCOL_TAG}
|
||||
docker pull signoz/signoz-schema-migrator:${OTELCOL_TAG}
|
||||
fi
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
2
.github/workflows/testing-deployment.yaml
vendored
2
.github/workflows/testing-deployment.yaml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export DEV_BUILD="1"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
docker system prune --force
|
||||
docker system prune --force --all
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -83,4 +83,3 @@ queries.active
|
||||
|
||||
# .devenv tmp files
|
||||
.devenv/**/tmp/**
|
||||
.qodo
|
||||
|
||||
17
.versions/alpine
Normal file
17
.versions/alpine
Normal file
@@ -0,0 +1,17 @@
|
||||
#### Auto generated by make docker-version-alpine. DO NOT EDIT! ####
|
||||
amd64=029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85
|
||||
unknown=5fea95373b9ec85974843f31446fa6a9df4492dddae4e1cb056193c34a20a5be
|
||||
arm=b4aef1a899e0271f06d948c9a8fa626ecdb2202d3a178bc14775dd559e23df8e
|
||||
unknown=a4d1e27e63a9d6353046eb25a2f0ec02945012b217f4364cd83a73fe6dfb0b15
|
||||
arm=4fdafe217d0922f3c3e2b4f64cf043f8403a4636685cd9c51fea2cbd1f419740
|
||||
unknown=7f21ac2018d95b2c51a5779c1d5ca6c327504adc3b0fdc747a6725d30b3f13c2
|
||||
arm64=ea3c5a9671f7b3f7eb47eab06f73bc6591df978b0d5955689a9e6f943aa368c0
|
||||
unknown=a8ba68c1a9e6eea8041b4b8f996c235163440808b9654a865976fdcbede0f433
|
||||
386=dea9f02e103e837849f984d5679305c758aba7fea1b95b7766218597f61a05ab
|
||||
unknown=3c6629bec05c8273a927d46b77428bf4a378dad911a0ae284887becdc149b734
|
||||
ppc64le=0880443bffa028dfbbc4094a32dd6b7ac25684e4c0a3d50da9e0acae355c5eaf
|
||||
unknown=bb48308f976b266e3ab39bbf9af84521959bd9c295d3c763690cf41f8df2a626
|
||||
riscv64=d76e6fbe348ff20c2931bb7f101e49379648e026de95dd37f96e00ce1909dcf7
|
||||
unknown=dd807544365f6dc187cbe6de0806adce2ea9de3e7124717d1d8e8b7a18b77b64
|
||||
s390x=b815fadf80495594eb6296a6af0bc647ae5f193e0044e07acec7e5b378c9ce2d
|
||||
unknown=74681be74a280a88abb53ff1e048eb1fb624b30d0066730df6d8afd02ba82e01
|
||||
@@ -174,7 +174,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.77.0
|
||||
image: signoz/signoz:v0.78.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
@@ -208,7 +208,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.37
|
||||
image: signoz/signoz-otel-collector:v0.111.38
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -232,7 +232,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.37
|
||||
image: signoz/signoz-schema-migrator:v0.111.38
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.77.0
|
||||
image: signoz/signoz:v0.78.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
@@ -143,7 +143,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.37
|
||||
image: signoz/signoz-otel-collector:v0.111.38
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -167,7 +167,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.37
|
||||
image: signoz/signoz-schema-migrator:v0.111.38
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -177,7 +177,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.77.0}
|
||||
image: signoz/signoz:${VERSION:-v0.78.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -212,7 +212,7 @@ services:
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -238,7 +238,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -249,7 +249,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.77.0}
|
||||
image: signoz/signoz:${VERSION:-v0.78.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -146,7 +146,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -168,7 +168,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -180,7 +180,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.77.0}
|
||||
image: signoz/signoz:${VERSION:-v0.78.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -144,7 +144,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -166,7 +166,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -178,7 +178,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
package licensingserver
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
PollingConfig PollingConfig `mapstructure:"polling"`
|
||||
}
|
||||
|
||||
type PollingConfig struct {
|
||||
Interval time.Duration `mapstructure:"interval"`
|
||||
}
|
||||
|
||||
func NewConfig() Config {
|
||||
return Config{
|
||||
PollingConfig: PollingConfig{
|
||||
Interval: 24 * time.Hour,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewConfigFromLicensingConfig(config licensing.Config) Config {
|
||||
return Config{
|
||||
PollingConfig: PollingConfig{
|
||||
Interval: config.PollingConfig.Interval,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package licensingserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"sync"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
logger *slog.Logger
|
||||
|
||||
cfg Config
|
||||
|
||||
orgID valuer.UUID
|
||||
|
||||
zeus zeus.Zeus
|
||||
|
||||
store licensetypes.Store
|
||||
|
||||
license licensetypes.License
|
||||
|
||||
mtx sync.RWMutex
|
||||
}
|
||||
|
||||
func NewServer(logger *slog.Logger, config Config, orgID valuer.UUID, zeus zeus.Zeus, store licensetypes.Store) *Server {
|
||||
return &Server{
|
||||
logger: logger,
|
||||
cfg: config,
|
||||
orgID: orgID,
|
||||
zeus: zeus,
|
||||
store: store,
|
||||
license: licensetypes.NewNoop(),
|
||||
}
|
||||
}
|
||||
|
||||
func (server *Server) Fetch(ctx context.Context) error {
|
||||
license, err := server.store.GetLatest(ctx, server.orgID)
|
||||
if err != nil {
|
||||
if errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
fetchedLicense, err := server.zeus.GetLicense(ctx, license.Key())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return server.SetLicense(ctx, fetchedLicense)
|
||||
}
|
||||
|
||||
func (server *Server) SetLicense(ctx context.Context, license licensetypes.License) error {
|
||||
server.mtx.Lock()
|
||||
defer server.mtx.Unlock()
|
||||
|
||||
server.license = license
|
||||
return nil
|
||||
}
|
||||
|
||||
func (server *Server) GetLicense(ctx context.Context) licensetypes.License {
|
||||
server.mtx.RLock()
|
||||
defer server.mtx.RUnlock()
|
||||
|
||||
return server.license
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package sqllicensingstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type store struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func NewStore(sqlstore sqlstore.SQLStore) licensetypes.Store {
|
||||
return &store{
|
||||
sqlstore: sqlstore,
|
||||
}
|
||||
}
|
||||
|
||||
func (store *store) Set(ctx context.Context, license licensetypes.License) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *store) Get(ctx context.Context, orgID valuer.UUID) ([]licensetypes.License, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (store *store) GetLatest(ctx context.Context, orgID valuer.UUID) (licensetypes.License, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (store *store) ListOrgs(ctx context.Context) ([]valuer.UUID, error) {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,109 +0,0 @@
|
||||
package pollinglicensing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/licensing/licensingstore/sqllicensingstore"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
config licensing.Config
|
||||
settings factory.ScopedProviderSettings
|
||||
zeus zeus.Zeus
|
||||
service *Service
|
||||
store licensetypes.Store
|
||||
stopC chan struct{}
|
||||
}
|
||||
|
||||
func NewFactory(zeus zeus.Zeus, sqlstore sqlstore.SQLStore) factory.ProviderFactory[licensing.Licensing, licensing.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("sql"), func(ctx context.Context, providerSettings factory.ProviderSettings, config licensing.Config) (licensing.Licensing, error) {
|
||||
return New(ctx, providerSettings, config, zeus, sqlstore)
|
||||
})
|
||||
}
|
||||
|
||||
func New(ctx context.Context, providerSettings factory.ProviderSettings, config licensing.Config, zeus zeus.Zeus, sqlstore sqlstore.SQLStore) (licensing.Licensing, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/licensing/pollinglicensing")
|
||||
store := sqllicensingstore.NewStore(sqlstore)
|
||||
|
||||
return &provider{
|
||||
config: config,
|
||||
settings: settings,
|
||||
zeus: zeus,
|
||||
service: NewService(ctx, settings, config, store, zeus),
|
||||
stopC: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Start(ctx context.Context) error {
|
||||
if err := provider.service.SyncServers(ctx); err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to sync licensing servers", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(provider.config.PollingConfig.Interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-provider.stopC:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
if err := provider.service.SyncServers(ctx); err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to sync licensing servers", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) GetLatestLicense(ctx context.Context, orgID valuer.UUID) (licensetypes.License, error) {
|
||||
server, err := provider.service.getServer(orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return server.GetLicense(ctx), nil
|
||||
}
|
||||
|
||||
func (provider *provider) GetLicenses(ctx context.Context, orgID valuer.UUID, params licensetypes.GettableLicenseParams) (licensetypes.GettableLicenses, error) {
|
||||
if params.Active != nil {
|
||||
if *params.Active {
|
||||
license, err := provider.GetLatestLicense(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return licensetypes.GettableLicenses{license}, nil
|
||||
}
|
||||
}
|
||||
|
||||
licenses, err := provider.store.Get(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return licenses, nil
|
||||
}
|
||||
|
||||
func (provider *provider) SetLicense(ctx context.Context, orgID valuer.UUID, key string) error {
|
||||
license, err := provider.zeus.GetLicense(ctx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := provider.store.Set(ctx, license); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return provider.service.SyncOrgServer(ctx, orgID)
|
||||
}
|
||||
|
||||
func (provider *provider) Stop(ctx context.Context) error {
|
||||
close(provider.stopC)
|
||||
return nil
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
package pollinglicensing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/licensing/licensingserver"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
// config is the config for the licensing service
|
||||
config licensing.Config
|
||||
|
||||
// store is the store for the licensing service
|
||||
store licensetypes.Store
|
||||
|
||||
// zeus
|
||||
zeus zeus.Zeus
|
||||
|
||||
// settings is the settings for the licensing service
|
||||
settings factory.ScopedProviderSettings
|
||||
|
||||
// Map of organization id to alertmanager server
|
||||
servers map[valuer.UUID]*licensingserver.Server
|
||||
|
||||
// Mutex to protect the servers map
|
||||
serversMtx sync.RWMutex
|
||||
}
|
||||
|
||||
func NewService(ctx context.Context, settings factory.ScopedProviderSettings, config licensing.Config, store licensetypes.Store, zeus zeus.Zeus) *Service {
|
||||
service := &Service{
|
||||
config: config,
|
||||
store: store,
|
||||
zeus: zeus,
|
||||
settings: settings,
|
||||
servers: make(map[valuer.UUID]*licensingserver.Server),
|
||||
serversMtx: sync.RWMutex{},
|
||||
}
|
||||
|
||||
return service
|
||||
}
|
||||
|
||||
func (service *Service) SyncServers(ctx context.Context) error {
|
||||
orgIDs, err := service.store.ListOrgs(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
service.serversMtx.Lock()
|
||||
for _, orgID := range orgIDs {
|
||||
// If the server is not present, create it and sync the config
|
||||
if _, ok := service.servers[orgID]; !ok {
|
||||
server := licensingserver.NewServer(service.settings.Logger(), licensingserver.NewConfigFromLicensingConfig(service.config), orgID, service.zeus, service.store)
|
||||
service.servers[orgID] = server
|
||||
}
|
||||
|
||||
err = service.servers[orgID].Fetch(ctx)
|
||||
if err != nil {
|
||||
service.settings.Logger().Error("failed to fetch license for licensing server", "orgID", orgID, "error", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
service.serversMtx.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *Service) SyncOrgServer(ctx context.Context, orgID valuer.UUID) error {
|
||||
service.serversMtx.Lock()
|
||||
defer service.serversMtx.Unlock()
|
||||
|
||||
_, ok := service.servers[orgID]
|
||||
if !ok {
|
||||
server := licensingserver.NewServer(service.settings.Logger(), licensingserver.NewConfigFromLicensingConfig(service.config), orgID, service.zeus, service.store)
|
||||
service.servers[orgID] = server
|
||||
}
|
||||
|
||||
err := service.servers[orgID].Fetch(ctx)
|
||||
if err != nil {
|
||||
service.settings.Logger().Error("failed to fetch license for licensing server", "orgID", orgID, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *Service) getServer(orgID valuer.UUID) (*licensingserver.Server, error) {
|
||||
service.serversMtx.RLock()
|
||||
defer service.serversMtx.RUnlock()
|
||||
|
||||
server, ok := service.servers[orgID]
|
||||
if !ok {
|
||||
return nil, errors.Newf(errors.TypeNotFound, licensing.ErrCodeLicensingServerNotFound, "server not found for %s", orgID.StringValue())
|
||||
}
|
||||
|
||||
return server, nil
|
||||
}
|
||||
@@ -18,4 +18,4 @@ COPY frontend/build/ /etc/signoz/web/
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
22
ee/query-service/Dockerfile.multi-arch
Normal file
22
ee/query-service/Dockerfile.multi-arch
Normal file
@@ -0,0 +1,22 @@
|
||||
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
|
||||
|
||||
FROM alpine@sha256:${ALPINE_SHA}
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG ARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY ./target/${OS}-${ARCH}/signoz /root/signoz
|
||||
COPY ./conf/prometheus.yml /root/config/prometheus.yml
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/dao"
|
||||
"github.com/SigNoz/signoz/ee/query-service/integrations/gateway"
|
||||
"github.com/SigNoz/signoz/ee/query-service/interfaces"
|
||||
"github.com/SigNoz/signoz/ee/query-service/license"
|
||||
"github.com/SigNoz/signoz/ee/query-service/usage"
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
@@ -28,7 +29,7 @@ import (
|
||||
)
|
||||
|
||||
type APIHandlerOptions struct {
|
||||
DataConnector baseint.Reader
|
||||
DataConnector interfaces.DataConnector
|
||||
SkipConfig *basemodel.SkipConfig
|
||||
PreferSpanMetrics bool
|
||||
AppDao dao.ModelDao
|
||||
|
||||
@@ -222,7 +222,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
}
|
||||
|
||||
// start the usagemanager
|
||||
usageManager, err := usage.New(lm.GetRepo(), serverOptions.SigNoz.TelemetryStore, serverOptions.SigNoz.Zeus)
|
||||
usageManager, err := usage.New(modelDao, lm.GetRepo(), serverOptions.SigNoz.TelemetryStore.ClickhouseDB(), serverOptions.Config.TelemetryStore.Clickhouse.DSN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
11
ee/query-service/interfaces/connector.go
Normal file
11
ee/query-service/interfaces/connector.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package interfaces
|
||||
|
||||
import (
|
||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
)
|
||||
|
||||
// Connector defines methods for interaction
|
||||
// with o11y data. for example - clickhouse
|
||||
type DataConnector interface {
|
||||
baseint.Reader
|
||||
}
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
|
||||
validate "github.com/SigNoz/signoz/ee/query-service/integrations/signozio"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/telemetry"
|
||||
@@ -29,7 +29,6 @@ var validationFrequency = 24 * 60 * time.Minute
|
||||
|
||||
type Manager struct {
|
||||
repo *Repo
|
||||
zeus zeus.Zeus
|
||||
mutex sync.Mutex
|
||||
validatorRunning bool
|
||||
// end the license validation, this is important to gracefully
|
||||
@@ -46,7 +45,7 @@ type Manager struct {
|
||||
activeFeatures basemodel.FeatureSet
|
||||
}
|
||||
|
||||
func StartManager(db *sqlx.DB, store sqlstore.SQLStore, zeus zeus.Zeus, features ...basemodel.Feature) (*Manager, error) {
|
||||
func StartManager(db *sqlx.DB, store sqlstore.SQLStore, features ...basemodel.Feature) (*Manager, error) {
|
||||
if LM != nil {
|
||||
return LM, nil
|
||||
}
|
||||
@@ -54,7 +53,6 @@ func StartManager(db *sqlx.DB, store sqlstore.SQLStore, zeus zeus.Zeus, features
|
||||
repo := NewLicenseRepo(db, store)
|
||||
m := &Manager{
|
||||
repo: &repo,
|
||||
zeus: zeus,
|
||||
}
|
||||
if err := m.start(features...); err != nil {
|
||||
return m, err
|
||||
@@ -175,12 +173,14 @@ func (lm *Manager) ValidatorV3(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (lm *Manager) RefreshLicense(ctx context.Context) *model.ApiError {
|
||||
license, err := lm.zeus.GetLicense(ctx, lm.activeLicenseV3.Key)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "failed to get license"))
|
||||
|
||||
license, apiError := validate.ValidateLicenseV3(lm.activeLicenseV3.Key)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to validate license", zap.Error(apiError.Err))
|
||||
return apiError
|
||||
}
|
||||
|
||||
err = lm.repo.UpdateLicenseV3(ctx, license)
|
||||
err := lm.repo.UpdateLicenseV3(ctx, license)
|
||||
if err != nil {
|
||||
return model.BadRequest(errors.Wrap(err, "failed to update the new license"))
|
||||
}
|
||||
@@ -247,9 +247,10 @@ func (lm *Manager) ActivateV3(ctx context.Context, licenseKey string) (licenseRe
|
||||
}
|
||||
}()
|
||||
|
||||
license, errv2 := lm.zeus.GetLicense(ctx, lm.activeLicenseV3.Key)
|
||||
if errv2 != nil {
|
||||
return nil, model.BadRequest(errors.Wrap(errv2, "failed to get license"))
|
||||
license, apiError := validate.ValidateLicenseV3(licenseKey)
|
||||
if apiError != nil {
|
||||
zap.L().Error("failed to get the license", zap.Error(apiError.Err))
|
||||
return nil, apiError
|
||||
}
|
||||
|
||||
// insert the new license to the sqlite db
|
||||
|
||||
244
ee/query-service/model/license.go
Normal file
244
ee/query-service/model/license.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type License struct {
|
||||
Key string `json:"key" db:"key"`
|
||||
ActivationId string `json:"activationId" db:"activationId"`
|
||||
CreatedAt time.Time `db:"created_at"`
|
||||
|
||||
// PlanDetails contains the encrypted plan info
|
||||
PlanDetails string `json:"planDetails" db:"planDetails"`
|
||||
|
||||
// stores parsed license details
|
||||
LicensePlan
|
||||
|
||||
FeatureSet basemodel.FeatureSet
|
||||
|
||||
// populated in case license has any errors
|
||||
ValidationMessage string `db:"validationMessage"`
|
||||
|
||||
// used only for sending details to front-end
|
||||
IsCurrent bool `json:"isCurrent"`
|
||||
}
|
||||
|
||||
func (l *License) MarshalJSON() ([]byte, error) {
|
||||
|
||||
return json.Marshal(&struct {
|
||||
Key string `json:"key" db:"key"`
|
||||
ActivationId string `json:"activationId" db:"activationId"`
|
||||
ValidationMessage string `db:"validationMessage"`
|
||||
IsCurrent bool `json:"isCurrent"`
|
||||
PlanKey string `json:"planKey"`
|
||||
ValidFrom time.Time `json:"ValidFrom"`
|
||||
ValidUntil time.Time `json:"ValidUntil"`
|
||||
Status string `json:"status"`
|
||||
}{
|
||||
Key: l.Key,
|
||||
ActivationId: l.ActivationId,
|
||||
IsCurrent: l.IsCurrent,
|
||||
PlanKey: l.PlanKey,
|
||||
ValidFrom: time.Unix(l.ValidFrom, 0),
|
||||
ValidUntil: time.Unix(l.ValidUntil, 0),
|
||||
Status: l.Status,
|
||||
ValidationMessage: l.ValidationMessage,
|
||||
})
|
||||
}
|
||||
|
||||
type LicensePlan struct {
|
||||
PlanKey string `json:"planKey"`
|
||||
ValidFrom int64 `json:"validFrom"`
|
||||
ValidUntil int64 `json:"validUntil"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
type Licenses struct {
|
||||
TrialStart int64 `json:"trialStart"`
|
||||
TrialEnd int64 `json:"trialEnd"`
|
||||
OnTrial bool `json:"onTrial"`
|
||||
WorkSpaceBlock bool `json:"workSpaceBlock"`
|
||||
TrialConvertedToSubscription bool `json:"trialConvertedToSubscription"`
|
||||
GracePeriodEnd int64 `json:"gracePeriodEnd"`
|
||||
Licenses []License `json:"licenses"`
|
||||
}
|
||||
|
||||
type SubscriptionServerResp struct {
|
||||
Status string `json:"status"`
|
||||
Data Licenses `json:"data"`
|
||||
}
|
||||
|
||||
type Plan struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type LicenseDB struct {
|
||||
ID string `json:"id"`
|
||||
Key string `json:"key"`
|
||||
Data string `json:"data"`
|
||||
}
|
||||
type LicenseV3 struct {
|
||||
ID string
|
||||
Key string
|
||||
Data map[string]interface{}
|
||||
PlanName string
|
||||
Features basemodel.FeatureSet
|
||||
Status string
|
||||
IsCurrent bool
|
||||
ValidFrom int64
|
||||
ValidUntil int64
|
||||
}
|
||||
|
||||
func extractKeyFromMapStringInterface[T any](data map[string]interface{}, key string) (T, error) {
|
||||
var zeroValue T
|
||||
if val, ok := data[key]; ok {
|
||||
if value, ok := val.(T); ok {
|
||||
return value, nil
|
||||
}
|
||||
return zeroValue, fmt.Errorf("%s key is not a valid %s", key, reflect.TypeOf(zeroValue))
|
||||
}
|
||||
return zeroValue, fmt.Errorf("%s key is missing", key)
|
||||
}
|
||||
|
||||
func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
|
||||
var features basemodel.FeatureSet
|
||||
|
||||
// extract id from data
|
||||
licenseID, err := extractKeyFromMapStringInterface[string](data, "id")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(data, "id")
|
||||
|
||||
// extract key from data
|
||||
licenseKey, err := extractKeyFromMapStringInterface[string](data, "key")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
delete(data, "key")
|
||||
|
||||
// extract status from data
|
||||
status, err := extractKeyFromMapStringInterface[string](data, "status")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
planMap, err := extractKeyFromMapStringInterface[map[string]any](data, "plan")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
planName, err := extractKeyFromMapStringInterface[string](planMap, "name")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// if license status is invalid then default it to basic
|
||||
if status == LicenseStatusInvalid {
|
||||
planName = PlanNameBasic
|
||||
}
|
||||
|
||||
featuresFromZeus := basemodel.FeatureSet{}
|
||||
if _features, ok := data["features"]; ok {
|
||||
featuresData, err := json.Marshal(_features)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal features data")
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(featuresData, &featuresFromZeus); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal features data")
|
||||
}
|
||||
}
|
||||
|
||||
switch planName {
|
||||
case PlanNameEnterprise:
|
||||
features = append(features, EnterprisePlan...)
|
||||
case PlanNameBasic:
|
||||
features = append(features, BasicPlan...)
|
||||
default:
|
||||
features = append(features, BasicPlan...)
|
||||
}
|
||||
|
||||
if len(featuresFromZeus) > 0 {
|
||||
for _, feature := range featuresFromZeus {
|
||||
exists := false
|
||||
for i, existingFeature := range features {
|
||||
if existingFeature.Name == feature.Name {
|
||||
features[i] = feature // Replace existing feature
|
||||
exists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !exists {
|
||||
features = append(features, feature) // Append if it doesn't exist
|
||||
}
|
||||
}
|
||||
}
|
||||
data["features"] = features
|
||||
|
||||
_validFrom, err := extractKeyFromMapStringInterface[float64](data, "valid_from")
|
||||
if err != nil {
|
||||
_validFrom = 0
|
||||
}
|
||||
validFrom := int64(_validFrom)
|
||||
|
||||
_validUntil, err := extractKeyFromMapStringInterface[float64](data, "valid_until")
|
||||
if err != nil {
|
||||
_validUntil = 0
|
||||
}
|
||||
validUntil := int64(_validUntil)
|
||||
|
||||
return &LicenseV3{
|
||||
ID: licenseID,
|
||||
Key: licenseKey,
|
||||
Data: data,
|
||||
PlanName: planName,
|
||||
Features: features,
|
||||
ValidFrom: validFrom,
|
||||
ValidUntil: validUntil,
|
||||
Status: status,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func NewLicenseV3WithIDAndKey(id string, key string, data map[string]interface{}) (*LicenseV3, error) {
|
||||
licenseDataWithIdAndKey := data
|
||||
licenseDataWithIdAndKey["id"] = id
|
||||
licenseDataWithIdAndKey["key"] = key
|
||||
return NewLicenseV3(licenseDataWithIdAndKey)
|
||||
}
|
||||
|
||||
func ConvertLicenseV3ToLicenseV2(l *LicenseV3) *License {
|
||||
planKeyFromPlanName, ok := MapOldPlanKeyToNewPlanName[l.PlanName]
|
||||
if !ok {
|
||||
planKeyFromPlanName = Basic
|
||||
}
|
||||
return &License{
|
||||
Key: l.Key,
|
||||
ActivationId: "",
|
||||
PlanDetails: "",
|
||||
FeatureSet: l.Features,
|
||||
ValidationMessage: "",
|
||||
IsCurrent: l.IsCurrent,
|
||||
LicensePlan: LicensePlan{
|
||||
PlanKey: planKeyFromPlanName,
|
||||
ValidFrom: l.ValidFrom,
|
||||
ValidUntil: l.ValidUntil,
|
||||
Status: l.Status},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type CheckoutRequest struct {
|
||||
SuccessURL string `json:"url"`
|
||||
}
|
||||
|
||||
type PortalRequest struct {
|
||||
SuccessURL string `json:"url"`
|
||||
}
|
||||
170
ee/query-service/model/license_test.go
Normal file
170
ee/query-service/model/license_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewLicenseV3(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
data []byte
|
||||
pass bool
|
||||
expected *LicenseV3
|
||||
error error
|
||||
}{
|
||||
{
|
||||
name: "Error for missing license id",
|
||||
data: []byte(`{}`),
|
||||
pass: false,
|
||||
error: errors.New("id key is missing"),
|
||||
},
|
||||
{
|
||||
name: "Error for license id not being a valid string",
|
||||
data: []byte(`{"id": 10}`),
|
||||
pass: false,
|
||||
error: errors.New("id key is not a valid string"),
|
||||
},
|
||||
{
|
||||
name: "Error for missing license key",
|
||||
data: []byte(`{"id":"does-not-matter"}`),
|
||||
pass: false,
|
||||
error: errors.New("key key is missing"),
|
||||
},
|
||||
{
|
||||
name: "Error for invalid string license key",
|
||||
data: []byte(`{"id":"does-not-matter","key":10}`),
|
||||
pass: false,
|
||||
error: errors.New("key key is not a valid string"),
|
||||
},
|
||||
{
|
||||
name: "Error for missing license status",
|
||||
data: []byte(`{"id":"does-not-matter", "key": "does-not-matter","category":"FREE"}`),
|
||||
pass: false,
|
||||
error: errors.New("status key is missing"),
|
||||
},
|
||||
{
|
||||
name: "Error for invalid string license status",
|
||||
data: []byte(`{"id":"does-not-matter","key": "does-not-matter", "category":"FREE", "status":10}`),
|
||||
pass: false,
|
||||
error: errors.New("status key is not a valid string"),
|
||||
},
|
||||
{
|
||||
name: "Error for missing license plan",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE"}`),
|
||||
pass: false,
|
||||
error: errors.New("plan key is missing"),
|
||||
},
|
||||
{
|
||||
name: "Error for invalid json license plan",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":10}`),
|
||||
pass: false,
|
||||
error: errors.New("plan key is not a valid map[string]interface {}"),
|
||||
},
|
||||
{
|
||||
name: "Error for invalid license plan",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{}}`),
|
||||
pass: false,
|
||||
error: errors.New("name key is missing"),
|
||||
},
|
||||
{
|
||||
name: "Parse the entire license properly",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"ENTERPRISE"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"category": "FREE",
|
||||
"status": "ACTIVE",
|
||||
"valid_from": float64(1730899309),
|
||||
"valid_until": float64(-1),
|
||||
},
|
||||
PlanName: PlanNameEnterprise,
|
||||
ValidFrom: 1730899309,
|
||||
ValidUntil: -1,
|
||||
Status: "ACTIVE",
|
||||
IsCurrent: false,
|
||||
Features: model.FeatureSet{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Fallback to basic plan if license status is invalid",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INVALID","plan":{"name":"ENTERPRISE"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"category": "FREE",
|
||||
"status": "INVALID",
|
||||
"valid_from": float64(1730899309),
|
||||
"valid_until": float64(-1),
|
||||
},
|
||||
PlanName: PlanNameBasic,
|
||||
ValidFrom: 1730899309,
|
||||
ValidUntil: -1,
|
||||
Status: "INVALID",
|
||||
IsCurrent: false,
|
||||
Features: model.FeatureSet{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "fallback states for validFrom and validUntil",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"ENTERPRISE"},"valid_from":1234.456,"valid_until":5678.567}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"valid_from": 1234.456,
|
||||
"valid_until": 5678.567,
|
||||
"category": "FREE",
|
||||
"status": "ACTIVE",
|
||||
},
|
||||
PlanName: PlanNameEnterprise,
|
||||
ValidFrom: 1234,
|
||||
ValidUntil: 5678,
|
||||
Status: "ACTIVE",
|
||||
IsCurrent: false,
|
||||
Features: model.FeatureSet{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
var licensePayload map[string]interface{}
|
||||
err := json.Unmarshal(tc.data, &licensePayload)
|
||||
require.NoError(t, err)
|
||||
license, err := NewLicenseV3(licensePayload)
|
||||
if license != nil {
|
||||
license.Features = make(model.FeatureSet, 0)
|
||||
delete(license.Data, "features")
|
||||
}
|
||||
|
||||
if tc.pass {
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, license)
|
||||
assert.Equal(t, tc.expected, license)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
assert.EqualError(t, err, tc.error.Error())
|
||||
require.Nil(t, license)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -6,17 +6,15 @@ import (
|
||||
|
||||
const SSO = "SSO"
|
||||
const Basic = "BASIC_PLAN"
|
||||
const Pro = "PRO_PLAN"
|
||||
const Enterprise = "ENTERPRISE_PLAN"
|
||||
|
||||
var (
|
||||
PlanNameEnterprise = "ENTERPRISE"
|
||||
PlanNameTeams = "TEAMS"
|
||||
PlanNameBasic = "BASIC"
|
||||
)
|
||||
|
||||
var (
|
||||
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameTeams: Pro, PlanNameEnterprise: Enterprise}
|
||||
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameEnterprise: Enterprise}
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -4,21 +4,22 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/go-co-op/gocron"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/dao"
|
||||
licenseserver "github.com/SigNoz/signoz/ee/query-service/integrations/signozio"
|
||||
"github.com/SigNoz/signoz/ee/query-service/license"
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/encryption"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -33,20 +34,35 @@ var (
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
telemetryStore telemetrystore.TelemetryStore
|
||||
licenseRepo *license.Repo
|
||||
scheduler *gocron.Scheduler
|
||||
zeus zeus.Zeus
|
||||
clickhouseConn clickhouse.Conn
|
||||
|
||||
licenseRepo *license.Repo
|
||||
|
||||
scheduler *gocron.Scheduler
|
||||
|
||||
modelDao dao.ModelDao
|
||||
|
||||
tenantID string
|
||||
}
|
||||
|
||||
func New(licenseRepo *license.Repo, telemetryStore telemetrystore.TelemetryStore, zeus zeus.Zeus) (*Manager, error) {
|
||||
m := &Manager{
|
||||
licenseRepo: licenseRepo,
|
||||
telemetryStore: telemetryStore,
|
||||
zeus: zeus,
|
||||
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
|
||||
func New(modelDao dao.ModelDao, licenseRepo *license.Repo, clickhouseConn clickhouse.Conn, chUrl string) (*Manager, error) {
|
||||
hostNameRegex := regexp.MustCompile(`tcp://(?P<hostname>.*):`)
|
||||
hostNameRegexMatches := hostNameRegex.FindStringSubmatch(chUrl)
|
||||
|
||||
tenantID := ""
|
||||
if len(hostNameRegexMatches) == 2 {
|
||||
tenantID = hostNameRegexMatches[1]
|
||||
tenantID = strings.TrimSuffix(tenantID, "-clickhouse")
|
||||
}
|
||||
|
||||
m := &Manager{
|
||||
// repository: repo,
|
||||
clickhouseConn: clickhouseConn,
|
||||
licenseRepo: licenseRepo,
|
||||
scheduler: gocron.NewScheduler(time.UTC).Every(1).Day().At("00:00"), // send usage every at 00:00 UTC
|
||||
modelDao: modelDao,
|
||||
tenantID: tenantID,
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
@@ -104,7 +120,7 @@ func (lm *Manager) UploadUsage() {
|
||||
|
||||
for _, db := range dbs {
|
||||
dbusages := []model.UsageDB{}
|
||||
err := lm.telemetryStore.ClickhouseDB().Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
||||
err := lm.clickhouseConn.Select(ctx, &dbusages, fmt.Sprintf(query, db, db), time.Now().Add(-(24 * time.Hour)))
|
||||
if err != nil && !strings.Contains(err.Error(), "doesn't exist") {
|
||||
zap.L().Error("failed to get usage from clickhouse: %v", zap.Error(err))
|
||||
return
|
||||
@@ -120,6 +136,17 @@ func (lm *Manager) UploadUsage() {
|
||||
return
|
||||
}
|
||||
|
||||
zap.L().Info("uploading usage data")
|
||||
|
||||
orgName := ""
|
||||
orgNames, orgError := lm.modelDao.GetOrgs(ctx)
|
||||
if orgError != nil {
|
||||
zap.L().Error("failed to get org data: %v", zap.Error(orgError))
|
||||
}
|
||||
if len(orgNames) == 1 {
|
||||
orgName = orgNames[0].Name
|
||||
}
|
||||
|
||||
usagesPayload := []model.Usage{}
|
||||
for _, usage := range usages {
|
||||
usageDataBytes, err := encryption.Decrypt([]byte(usage.ExporterID[:32]), []byte(usage.Data))
|
||||
@@ -139,8 +166,8 @@ func (lm *Manager) UploadUsage() {
|
||||
usageData.ExporterID = usage.ExporterID
|
||||
usageData.Type = usage.Type
|
||||
usageData.Tenant = "default"
|
||||
usageData.OrgName = "default"
|
||||
usageData.TenantId = "default"
|
||||
usageData.OrgName = orgName
|
||||
usageData.TenantId = lm.tenantID
|
||||
usagesPayload = append(usagesPayload, usageData)
|
||||
}
|
||||
|
||||
@@ -149,7 +176,6 @@ func (lm *Manager) UploadUsage() {
|
||||
LicenseKey: key,
|
||||
Usage: usagesPayload,
|
||||
}
|
||||
|
||||
lm.UploadUsageWithExponentalBackOff(ctx, payload)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package featuretypes
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
|
||||
var (
|
||||
SingleSignOn = featuretypes.MustNewName("SingleSignOn")
|
||||
)
|
||||
|
||||
func NewEnterpriseRegistry() (featuretypes.Registry, error) {
|
||||
enterpriseRegistry, err := featuretypes.NewRegistry(
|
||||
&featuretypes.Feature{
|
||||
Name: SingleSignOn,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
Description: "Enable single sign on.",
|
||||
Stage: featuretypes.StageStable,
|
||||
Default: true,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return enterpriseRegistry.MergeOrOverride(featuretypes.MustNewCommunityRegistry()), nil
|
||||
}
|
||||
|
||||
func MustNewEnterpriseRegistry() featuretypes.Registry {
|
||||
enterpriseRegistry, err := NewEnterpriseRegistry()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return enterpriseRegistry
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
package licensetypes
|
||||
@@ -1,245 +0,0 @@
|
||||
package licensetypes
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
|
||||
type License struct {
|
||||
ID string
|
||||
Key string
|
||||
Contents map[string]any
|
||||
OrgFeatures []*featuretypes.StorableOrgFeature
|
||||
}
|
||||
|
||||
// type License struct {
|
||||
// Key string `json:"key" db:"key"`
|
||||
// ActivationId string `json:"activationId" db:"activationId"`
|
||||
// CreatedAt time.Time `db:"created_at"`
|
||||
|
||||
// // PlanDetails contains the encrypted plan info
|
||||
// PlanDetails string `json:"planDetails" db:"planDetails"`
|
||||
|
||||
// // stores parsed license details
|
||||
// LicensePlan
|
||||
|
||||
// FeatureSet basemodel.FeatureSet
|
||||
|
||||
// // populated in case license has any errors
|
||||
// ValidationMessage string `db:"validationMessage"`
|
||||
|
||||
// // used only for sending details to front-end
|
||||
// IsCurrent bool `json:"isCurrent"`
|
||||
// }
|
||||
|
||||
// func (l *License) MarshalJSON() ([]byte, error) {
|
||||
|
||||
// return json.Marshal(&struct {
|
||||
// Key string `json:"key" db:"key"`
|
||||
// ActivationId string `json:"activationId" db:"activationId"`
|
||||
// ValidationMessage string `db:"validationMessage"`
|
||||
// IsCurrent bool `json:"isCurrent"`
|
||||
// PlanKey string `json:"planKey"`
|
||||
// ValidFrom time.Time `json:"ValidFrom"`
|
||||
// ValidUntil time.Time `json:"ValidUntil"`
|
||||
// Status string `json:"status"`
|
||||
// }{
|
||||
// Key: l.Key,
|
||||
// ActivationId: l.ActivationId,
|
||||
// IsCurrent: l.IsCurrent,
|
||||
// PlanKey: l.PlanKey,
|
||||
// ValidFrom: time.Unix(l.ValidFrom, 0),
|
||||
// ValidUntil: time.Unix(l.ValidUntil, 0),
|
||||
// Status: l.Status,
|
||||
// ValidationMessage: l.ValidationMessage,
|
||||
// })
|
||||
// }
|
||||
|
||||
// type LicensePlan struct {
|
||||
// PlanKey string `json:"planKey"`
|
||||
// ValidFrom int64 `json:"validFrom"`
|
||||
// ValidUntil int64 `json:"validUntil"`
|
||||
// Status string `json:"status"`
|
||||
// }
|
||||
|
||||
// type Licenses struct {
|
||||
// TrialStart int64 `json:"trialStart"`
|
||||
// TrialEnd int64 `json:"trialEnd"`
|
||||
// OnTrial bool `json:"onTrial"`
|
||||
// WorkSpaceBlock bool `json:"workSpaceBlock"`
|
||||
// TrialConvertedToSubscription bool `json:"trialConvertedToSubscription"`
|
||||
// GracePeriodEnd int64 `json:"gracePeriodEnd"`
|
||||
// Licenses []License `json:"licenses"`
|
||||
// }
|
||||
|
||||
// type SubscriptionServerResp struct {
|
||||
// Status string `json:"status"`
|
||||
// Data Licenses `json:"data"`
|
||||
// }
|
||||
|
||||
// type Plan struct {
|
||||
// Name string `json:"name"`
|
||||
// }
|
||||
|
||||
// type LicenseDB struct {
|
||||
// ID string `json:"id"`
|
||||
// Key string `json:"key"`
|
||||
// Data string `json:"data"`
|
||||
// }
|
||||
// type LicenseV3 struct {
|
||||
// ID string
|
||||
// Key string
|
||||
// Data map[string]interface{}
|
||||
// PlanName string
|
||||
// Features basemodel.FeatureSet
|
||||
// Status string
|
||||
// IsCurrent bool
|
||||
// ValidFrom int64
|
||||
// ValidUntil int64
|
||||
// }
|
||||
|
||||
// func extractKeyFromMapStringInterface[T any](data map[string]interface{}, key string) (T, error) {
|
||||
// var zeroValue T
|
||||
// if val, ok := data[key]; ok {
|
||||
// if value, ok := val.(T); ok {
|
||||
// return value, nil
|
||||
// }
|
||||
// return zeroValue, fmt.Errorf("%s key is not a valid %s", key, reflect.TypeOf(zeroValue))
|
||||
// }
|
||||
// return zeroValue, fmt.Errorf("%s key is missing", key)
|
||||
// }
|
||||
|
||||
// func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
|
||||
// var features basemodel.FeatureSet
|
||||
|
||||
// // extract id from data
|
||||
// licenseID, err := extractKeyFromMapStringInterface[string](data, "id")
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// delete(data, "id")
|
||||
|
||||
// // extract key from data
|
||||
// licenseKey, err := extractKeyFromMapStringInterface[string](data, "key")
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// delete(data, "key")
|
||||
|
||||
// // extract status from data
|
||||
// status, err := extractKeyFromMapStringInterface[string](data, "status")
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// planMap, err := extractKeyFromMapStringInterface[map[string]any](data, "plan")
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// planName, err := extractKeyFromMapStringInterface[string](planMap, "name")
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// // if license status is invalid then default it to basic
|
||||
// if status == LicenseStatusInvalid {
|
||||
// planName = PlanNameBasic
|
||||
// }
|
||||
|
||||
// featuresFromZeus := basemodel.FeatureSet{}
|
||||
// if _features, ok := data["features"]; ok {
|
||||
// featuresData, err := json.Marshal(_features)
|
||||
// if err != nil {
|
||||
// return nil, errors.Wrap(err, "failed to marshal features data")
|
||||
// }
|
||||
|
||||
// if err := json.Unmarshal(featuresData, &featuresFromZeus); err != nil {
|
||||
// return nil, errors.Wrap(err, "failed to unmarshal features data")
|
||||
// }
|
||||
// }
|
||||
|
||||
// switch planName {
|
||||
// case PlanNameTeams:
|
||||
// features = append(features, ProPlan...)
|
||||
// case PlanNameEnterprise:
|
||||
// features = append(features, EnterprisePlan...)
|
||||
// case PlanNameBasic:
|
||||
// features = append(features, BasicPlan...)
|
||||
// default:
|
||||
// features = append(features, BasicPlan...)
|
||||
// }
|
||||
|
||||
// if len(featuresFromZeus) > 0 {
|
||||
// for _, feature := range featuresFromZeus {
|
||||
// exists := false
|
||||
// for i, existingFeature := range features {
|
||||
// if existingFeature.Name == feature.Name {
|
||||
// features[i] = feature // Replace existing feature
|
||||
// exists = true
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// if !exists {
|
||||
// features = append(features, feature) // Append if it doesn't exist
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// data["features"] = features
|
||||
|
||||
// _validFrom, err := extractKeyFromMapStringInterface[float64](data, "valid_from")
|
||||
// if err != nil {
|
||||
// _validFrom = 0
|
||||
// }
|
||||
// validFrom := int64(_validFrom)
|
||||
|
||||
// _validUntil, err := extractKeyFromMapStringInterface[float64](data, "valid_until")
|
||||
// if err != nil {
|
||||
// _validUntil = 0
|
||||
// }
|
||||
// validUntil := int64(_validUntil)
|
||||
|
||||
// return &LicenseV3{
|
||||
// ID: licenseID,
|
||||
// Key: licenseKey,
|
||||
// Data: data,
|
||||
// PlanName: planName,
|
||||
// Features: features,
|
||||
// ValidFrom: validFrom,
|
||||
// ValidUntil: validUntil,
|
||||
// Status: status,
|
||||
// }, nil
|
||||
|
||||
// }
|
||||
|
||||
// func NewLicenseV3WithIDAndKey(id string, key string, data map[string]interface{}) (*LicenseV3, error) {
|
||||
// licenseDataWithIdAndKey := data
|
||||
// licenseDataWithIdAndKey["id"] = id
|
||||
// licenseDataWithIdAndKey["key"] = key
|
||||
// return NewLicenseV3(licenseDataWithIdAndKey)
|
||||
// }
|
||||
|
||||
// func ConvertLicenseV3ToLicenseV2(l *LicenseV3) *License {
|
||||
// planKeyFromPlanName, ok := MapOldPlanKeyToNewPlanName[l.PlanName]
|
||||
// if !ok {
|
||||
// planKeyFromPlanName = Basic
|
||||
// }
|
||||
// return &License{
|
||||
// Key: l.Key,
|
||||
// ActivationId: "",
|
||||
// PlanDetails: "",
|
||||
// FeatureSet: l.Features,
|
||||
// ValidationMessage: "",
|
||||
// IsCurrent: l.IsCurrent,
|
||||
// LicensePlan: LicensePlan{
|
||||
// PlanKey: planKeyFromPlanName,
|
||||
// ValidFrom: l.ValidFrom,
|
||||
// ValidUntil: l.ValidUntil,
|
||||
// Status: l.Status},
|
||||
// }
|
||||
|
||||
// }
|
||||
|
||||
// type CheckoutRequest struct {
|
||||
// SuccessURL string `json:"url"`
|
||||
// }
|
||||
|
||||
// type PortalRequest struct {
|
||||
// SuccessURL string `json:"url"`
|
||||
// }
|
||||
@@ -1,160 +0,0 @@
|
||||
package licensetypes
|
||||
|
||||
// func TestNewLicenseV3(t *testing.T) {
|
||||
// testCases := []struct {
|
||||
// name string
|
||||
// data []byte
|
||||
// pass bool
|
||||
// expected *LicenseV3
|
||||
// error error
|
||||
// }{
|
||||
// {
|
||||
// name: "Error for missing license id",
|
||||
// data: []byte(`{}`),
|
||||
// pass: false,
|
||||
// error: errors.New("id key is missing"),
|
||||
// },
|
||||
// {
|
||||
// name: "Error for license id not being a valid string",
|
||||
// data: []byte(`{"id": 10}`),
|
||||
// pass: false,
|
||||
// error: errors.New("id key is not a valid string"),
|
||||
// },
|
||||
// {
|
||||
// name: "Error for missing license key",
|
||||
// data: []byte(`{"id":"does-not-matter"}`),
|
||||
// pass: false,
|
||||
// error: errors.New("key key is missing"),
|
||||
// },
|
||||
// {
|
||||
// name: "Error for invalid string license key",
|
||||
// data: []byte(`{"id":"does-not-matter","key":10}`),
|
||||
// pass: false,
|
||||
// error: errors.New("key key is not a valid string"),
|
||||
// },
|
||||
// {
|
||||
// name: "Error for missing license status",
|
||||
// data: []byte(`{"id":"does-not-matter", "key": "does-not-matter","category":"FREE"}`),
|
||||
// pass: false,
|
||||
// error: errors.New("status key is missing"),
|
||||
// },
|
||||
// {
|
||||
// name: "Error for invalid string license status",
|
||||
// data: []byte(`{"id":"does-not-matter","key": "does-not-matter", "category":"FREE", "status":10}`),
|
||||
// pass: false,
|
||||
// error: errors.New("status key is not a valid string"),
|
||||
// },
|
||||
// {
|
||||
// name: "Error for missing license plan",
|
||||
// data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE"}`),
|
||||
// pass: false,
|
||||
// error: errors.New("plan key is missing"),
|
||||
// },
|
||||
// {
|
||||
// name: "Error for invalid json license plan",
|
||||
// data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":10}`),
|
||||
// pass: false,
|
||||
// error: errors.New("plan key is not a valid map[string]interface {}"),
|
||||
// },
|
||||
// {
|
||||
// name: "Error for invalid license plan",
|
||||
// data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{}}`),
|
||||
// pass: false,
|
||||
// error: errors.New("name key is missing"),
|
||||
// },
|
||||
// {
|
||||
// name: "Parse the entire license properly",
|
||||
// data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
// pass: true,
|
||||
// expected: &LicenseV3{
|
||||
// ID: "does-not-matter",
|
||||
// Key: "does-not-matter-key",
|
||||
// Data: map[string]interface{}{
|
||||
// "plan": map[string]interface{}{
|
||||
// "name": "TEAMS",
|
||||
// },
|
||||
// "category": "FREE",
|
||||
// "status": "ACTIVE",
|
||||
// "valid_from": float64(1730899309),
|
||||
// "valid_until": float64(-1),
|
||||
// },
|
||||
// PlanName: PlanNameTeams,
|
||||
// ValidFrom: 1730899309,
|
||||
// ValidUntil: -1,
|
||||
// Status: "ACTIVE",
|
||||
// IsCurrent: false,
|
||||
// Features: model.FeatureSet{},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "Fallback to basic plan if license status is invalid",
|
||||
// data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INVALID","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
// pass: true,
|
||||
// expected: &LicenseV3{
|
||||
// ID: "does-not-matter",
|
||||
// Key: "does-not-matter-key",
|
||||
// Data: map[string]interface{}{
|
||||
// "plan": map[string]interface{}{
|
||||
// "name": "TEAMS",
|
||||
// },
|
||||
// "category": "FREE",
|
||||
// "status": "INVALID",
|
||||
// "valid_from": float64(1730899309),
|
||||
// "valid_until": float64(-1),
|
||||
// },
|
||||
// PlanName: PlanNameBasic,
|
||||
// ValidFrom: 1730899309,
|
||||
// ValidUntil: -1,
|
||||
// Status: "INVALID",
|
||||
// IsCurrent: false,
|
||||
// Features: model.FeatureSet{},
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// name: "fallback states for validFrom and validUntil",
|
||||
// data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from":1234.456,"valid_until":5678.567}`),
|
||||
// pass: true,
|
||||
// expected: &LicenseV3{
|
||||
// ID: "does-not-matter",
|
||||
// Key: "does-not-matter-key",
|
||||
// Data: map[string]interface{}{
|
||||
// "plan": map[string]interface{}{
|
||||
// "name": "TEAMS",
|
||||
// },
|
||||
// "valid_from": 1234.456,
|
||||
// "valid_until": 5678.567,
|
||||
// "category": "FREE",
|
||||
// "status": "ACTIVE",
|
||||
// },
|
||||
// PlanName: PlanNameTeams,
|
||||
// ValidFrom: 1234,
|
||||
// ValidUntil: 5678,
|
||||
// Status: "ACTIVE",
|
||||
// IsCurrent: false,
|
||||
// Features: model.FeatureSet{},
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// for _, tc := range testCases {
|
||||
// var licensePayload map[string]interface{}
|
||||
// err := json.Unmarshal(tc.data, &licensePayload)
|
||||
// require.NoError(t, err)
|
||||
// license, err := NewLicenseV3(licensePayload)
|
||||
// if license != nil {
|
||||
// license.Features = make(model.FeatureSet, 0)
|
||||
// delete(license.Data, "features")
|
||||
// }
|
||||
|
||||
// if tc.pass {
|
||||
// require.NoError(t, err)
|
||||
// require.NotNil(t, license)
|
||||
// assert.Equal(t, tc.expected, license)
|
||||
// } else {
|
||||
// require.Error(t, err)
|
||||
// assert.EqualError(t, err, tc.error.Error())
|
||||
// require.Nil(t, license)
|
||||
// }
|
||||
|
||||
// }
|
||||
// }
|
||||
@@ -1,31 +0,0 @@
|
||||
package zeus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
)
|
||||
|
||||
// This will be set via ldflags at build time.
|
||||
var (
|
||||
url string = "<unset>"
|
||||
once sync.Once
|
||||
GlobalConfig zeus.Config
|
||||
)
|
||||
|
||||
// init initializes and validates the Zeus configuration
|
||||
func init() {
|
||||
once.Do(func() {
|
||||
parsedURL, err := neturl.Parse(url)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("invalid zeus URL: %w", err))
|
||||
}
|
||||
|
||||
GlobalConfig = zeus.Config{URL: parsedURL}
|
||||
if err := GlobalConfig.Validate(); err != nil {
|
||||
panic(fmt.Errorf("invalid zeus config: %w", err))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package implzeus
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/http/client"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metertypes"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
)
|
||||
|
||||
type Provider struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
config zeus.Config
|
||||
client *client.Client
|
||||
}
|
||||
|
||||
func NewProviderFactory() factory.ProviderFactory[zeus.Zeus, zeus.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("impl"), func(ctx context.Context, providerSettings factory.ProviderSettings, config zeus.Config) (zeus.Zeus, error) {
|
||||
return New(ctx, providerSettings, config)
|
||||
})
|
||||
}
|
||||
|
||||
func New(ctx context.Context, providerSettings factory.ProviderSettings, config zeus.Config) (zeus.Zeus, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/zeus/implzeus")
|
||||
|
||||
httpClient := client.New(
|
||||
settings.Logger(),
|
||||
providerSettings.TracerProvider,
|
||||
providerSettings.MeterProvider,
|
||||
client.WithRequestResponseLog(true),
|
||||
client.WithRetryCount(3),
|
||||
)
|
||||
|
||||
return &Provider{
|
||||
settings: settings,
|
||||
config: config,
|
||||
client: httpClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetLicense(ctx context.Context, key string) (*licensetypes.License, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetCheckoutURL(ctx context.Context, key string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetPortalURL(ctx context.Context, key string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetDeployment(ctx context.Context, key string) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) PutMeters(ctx context.Context, key string, meters metertypes.Meters) error {
|
||||
return nil
|
||||
}
|
||||
@@ -11,9 +11,12 @@ const logEvent = async (
|
||||
rateLimited?: boolean,
|
||||
): Promise<SuccessResponse<EventSuccessPayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
// add tenant_url to attributes
|
||||
const { hostname } = window.location;
|
||||
const updatedAttributes = { ...attributes, tenant_url: hostname };
|
||||
const response = await axios.post('/event', {
|
||||
eventName,
|
||||
attributes,
|
||||
attributes: updatedAttributes,
|
||||
eventType: eventType || 'track',
|
||||
rateLimited: rateLimited || false, // TODO: Update this once we have a proper way to handle rate limiting
|
||||
});
|
||||
|
||||
@@ -8,6 +8,5 @@ export enum FeatureKeys {
|
||||
PREMIUM_SUPPORT = 'PREMIUM_SUPPORT',
|
||||
ANOMALY_DETECTION = 'ANOMALY_DETECTION',
|
||||
ONBOARDING_V3 = 'ONBOARDING_V3',
|
||||
THIRD_PARTY_API = 'THIRD_PARTY_API',
|
||||
TRACE_FUNNELS = 'TRACE_FUNNELS',
|
||||
}
|
||||
|
||||
@@ -684,10 +684,6 @@ function FormAlertRules({
|
||||
|
||||
const isAlertNameMissing = !formInstance.getFieldValue('alert');
|
||||
|
||||
const isAlertAvailableToSave =
|
||||
currentQuery.queryType === EQueryType.QUERY_BUILDER &&
|
||||
alertType !== AlertTypes.METRICS_BASED_ALERT;
|
||||
|
||||
const onUnitChangeHandler = (value: string): void => {
|
||||
setYAxisUnit(value);
|
||||
// reset target unit
|
||||
@@ -860,7 +856,6 @@ function FormAlertRules({
|
||||
icon={<SaveOutlined />}
|
||||
disabled={
|
||||
isAlertNameMissing ||
|
||||
isAlertAvailableToSave ||
|
||||
!isChannelConfigurationValid ||
|
||||
queryStatus === 'error'
|
||||
}
|
||||
|
||||
@@ -284,16 +284,6 @@ function SideNav(): JSX.Element {
|
||||
manageLicenseMenuItem,
|
||||
];
|
||||
|
||||
const isApiMonitoringEnabled = featureFlags?.find(
|
||||
(flag) => flag.name === FeatureKeys.THIRD_PARTY_API,
|
||||
)?.active;
|
||||
|
||||
if (!isApiMonitoringEnabled) {
|
||||
updatedMenuItems = updatedMenuItems.filter(
|
||||
(item) => item.key !== ROUTES.API_MONITORING,
|
||||
);
|
||||
}
|
||||
|
||||
if (isCloudUser || isEnterpriseSelfHostedUser) {
|
||||
const isOnboardingEnabled =
|
||||
featureFlags?.find((feature) => feature.name === FeatureKeys.ONBOARDING)
|
||||
|
||||
4
go.mod
4
go.mod
@@ -28,6 +28,7 @@ require (
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/gosimple/slug v1.10.0
|
||||
github.com/huandu/go-sqlbuilder v1.35.0
|
||||
github.com/jackc/pgx/v5 v5.7.2
|
||||
github.com/jmoiron/sqlx v1.3.4
|
||||
github.com/json-iterator/go v1.1.12
|
||||
@@ -91,6 +92,7 @@ require (
|
||||
github.com/ClickHouse/ch-go v0.61.5 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||
@@ -150,6 +152,7 @@ require (
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/memberlist v0.5.1 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
@@ -181,7 +184,6 @@ require (
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/open-feature/go-sdk v1.14.1 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.111.0 // indirect
|
||||
github.com/paulmach/orb v0.11.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
|
||||
10
go.sum
10
go.sum
@@ -113,6 +113,8 @@ github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
|
||||
github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI=
|
||||
github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
@@ -537,6 +539,12 @@ github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOF
|
||||
github.com/hjson/hjson-go/v4 v4.0.0 h1:wlm6IYYqHjOdXH1gHev4VoXCaW20HdQAGCxdOEEg2cs=
|
||||
github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/go-assert v1.1.6 h1:oaAfYxq9KNDi9qswn/6aE0EydfxSa+tWZC1KabNitYs=
|
||||
github.com/huandu/go-assert v1.1.6/go.mod h1:JuIfbmYG9ykwvuxoJ3V8TB5QP+3+ajIA54Y44TmkMxs=
|
||||
github.com/huandu/go-sqlbuilder v1.35.0 h1:ESvxFHN8vxCTudY1Vq63zYpU5yJBESn19sf6k4v2T5Q=
|
||||
github.com/huandu/go-sqlbuilder v1.35.0/go.mod h1:mS0GAtrtW+XL6nM2/gXHRJax2RwSW1TraavWDFAc1JA=
|
||||
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
|
||||
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
@@ -717,8 +725,6 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/open-feature/go-sdk v1.14.1 h1:jcxjCIG5Up3XkgYwWN5Y/WWfc6XobOhqrIwjyDBsoQo=
|
||||
github.com/open-feature/go-sdk v1.14.1/go.mod h1:t337k0VB/t/YxJ9S0prT30ISUHwYmUd/jhUZgFcOvGg=
|
||||
github.com/open-telemetry/opamp-go v0.5.0 h1:2YFbb6G4qBkq3yTRdVb5Nfz9hKHW/ldUyex352e1J7g=
|
||||
github.com/open-telemetry/opamp-go v0.5.0/go.mod h1:IMdeuHGVc5CjKSu5/oNV0o+UmiXuahoHvoZ4GOmAI9M=
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.111.0 h1:n1p2DedLvPEN1XEx26s1PR1PCuXTgCY4Eo+kDTq7q0s=
|
||||
|
||||
221
grammar/FilterQuery.g4
Normal file
221
grammar/FilterQuery.g4
Normal file
@@ -0,0 +1,221 @@
|
||||
grammar FilterQuery;
|
||||
|
||||
/*
|
||||
* Parser Rules
|
||||
*/
|
||||
|
||||
query
|
||||
: expression
|
||||
EOF
|
||||
;
|
||||
|
||||
// Expression with standard boolean precedence:
|
||||
// - parentheses > NOT > AND > OR
|
||||
// - consecutive expressions with no AND/OR => implicit AND
|
||||
expression
|
||||
: orExpression
|
||||
;
|
||||
|
||||
// OR expressions
|
||||
orExpression
|
||||
: andExpression ( OR andExpression )*
|
||||
;
|
||||
|
||||
// AND expressions + optional chaining with implicit AND if no OR is present
|
||||
andExpression
|
||||
: unaryExpression ( AND unaryExpression | unaryExpression )*
|
||||
;
|
||||
|
||||
// A unary expression handles optional NOT
|
||||
unaryExpression
|
||||
: NOT? primary
|
||||
;
|
||||
|
||||
// Primary constructs: grouped expressions, a comparison (key op value),
|
||||
// a function call, or a full-text string
|
||||
primary
|
||||
: LPAREN orExpression RPAREN
|
||||
| comparison
|
||||
| functionCall
|
||||
| fullText
|
||||
| key
|
||||
;
|
||||
|
||||
/*
|
||||
* Comparison-like filters
|
||||
*
|
||||
* Includes all operators: =, !=, <>, <, <=, >, >=, [NOT] LIKE, [NOT] ILIKE,
|
||||
* [NOT] BETWEEN, [NOT] IN, [NOT] EXISTS, [NOT] REGEXP, [NOT] CONTAINS, etc.
|
||||
*/
|
||||
comparison
|
||||
: key EQUALS value
|
||||
| key (NOT_EQUALS | NEQ) value
|
||||
| key LT value
|
||||
| key LE value
|
||||
| key GT value
|
||||
| key GE value
|
||||
|
||||
| key (LIKE | ILIKE) value
|
||||
| key (NOT_LIKE | NOT_ILIKE) value
|
||||
|
||||
| key BETWEEN value AND value
|
||||
| key NOT BETWEEN value AND value
|
||||
|
||||
| key inClause
|
||||
| key notInClause
|
||||
|
||||
| key EXISTS
|
||||
| key NOT EXISTS
|
||||
|
||||
| key REGEXP value
|
||||
| key NOT REGEXP value
|
||||
|
||||
| key CONTAINS value
|
||||
| key NOT CONTAINS value
|
||||
;
|
||||
|
||||
// in(...) or in[...]
|
||||
inClause
|
||||
: IN LPAREN valueList RPAREN
|
||||
| IN LBRACK valueList RBRACK
|
||||
;
|
||||
|
||||
notInClause
|
||||
: NOT IN LPAREN valueList RPAREN
|
||||
| NOT IN LBRACK valueList RBRACK
|
||||
;
|
||||
|
||||
// List of values for in(...) or in[...]
|
||||
valueList
|
||||
: value ( COMMA value )*
|
||||
;
|
||||
|
||||
// Full-text search: a standalone quoted string is allowed as a "primary"
|
||||
// e.g. `"Waiting for response" http.status_code=200`
|
||||
fullText
|
||||
: QUOTED_TEXT
|
||||
| FREETEXT
|
||||
;
|
||||
|
||||
/*
|
||||
* Function calls like:
|
||||
* has(payload.user_ids, 123)
|
||||
* hasAny(payload.user_ids, [123, 456])
|
||||
* ...
|
||||
*/
|
||||
functionCall
|
||||
: (HAS | HASANY | HASALL | HASNONE) LPAREN functionParamList RPAREN
|
||||
;
|
||||
|
||||
// Function parameters can be keys, single scalar values, or arrays
|
||||
functionParamList
|
||||
: functionParam ( COMMA functionParam )*
|
||||
;
|
||||
|
||||
functionParam
|
||||
: key
|
||||
| value
|
||||
| array
|
||||
;
|
||||
|
||||
// An array: [ item1, item2, item3 ]
|
||||
array
|
||||
: LBRACK valueList RBRACK
|
||||
;
|
||||
|
||||
/*
|
||||
* A 'value' can be a string literal (double or single-quoted),
|
||||
// a numeric literal, boolean, or a "bare" token as needed.
|
||||
*/
|
||||
value
|
||||
: QUOTED_TEXT
|
||||
| NUMBER
|
||||
| BOOL
|
||||
| KEY
|
||||
;
|
||||
|
||||
/*
|
||||
* A key can include letters, digits, underscores, dots, brackets
|
||||
* E.g. service.name, query_log.query_duration_ms, proto.user_objects[].name
|
||||
*/
|
||||
key
|
||||
: KEY
|
||||
;
|
||||
|
||||
|
||||
/*
|
||||
* Lexer Rules
|
||||
*/
|
||||
|
||||
// Common punctuation / symbols
|
||||
LPAREN : '(' ;
|
||||
RPAREN : ')' ;
|
||||
LBRACK : '[' ;
|
||||
RBRACK : ']' ;
|
||||
COMMA : ',' ;
|
||||
|
||||
EQUALS : '=' | '==' ;
|
||||
NOT_EQUALS : '!=' ;
|
||||
NEQ : '<>' ; // alternate not-equals operator
|
||||
LT : '<' ;
|
||||
LE : '<=' ;
|
||||
GT : '>' ;
|
||||
GE : '>=' ;
|
||||
|
||||
// Operators that are made of multiple keywords
|
||||
LIKE : [Ll][Ii][Kk][Ee] ;
|
||||
NOT_LIKE : [Nn][Oo][Tt] [ \t]+ [Ll][Ii][Kk][Ee] ;
|
||||
ILIKE : [Ii][Ll][Ii][Kk][Ee] ;
|
||||
NOT_ILIKE : [Nn][Oo][Tt] [ \t]+ [Ii][Ll][Ii][Kk][Ee] ;
|
||||
BETWEEN : [Bb][Ee][Tt][Ww][Ee][Ee][Nn] ;
|
||||
EXISTS : [Ee][Xx][Ii][Ss][Tt][Ss]? ;
|
||||
REGEXP : [Rr][Ee][Gg][Ee][Xx][Pp] ;
|
||||
CONTAINS : [Cc][Oo][Nn][Tt][Aa][Ii][Nn][Ss]? ;
|
||||
IN : [Ii][Nn] ;
|
||||
|
||||
// Boolean logic
|
||||
NOT : [Nn][Oo][Tt] ;
|
||||
AND : [Aa][Nn][Dd] ;
|
||||
OR : [Oo][Rr] ;
|
||||
|
||||
// For easy referencing in function calls
|
||||
HAS : [Hh][Aa][Ss] ;
|
||||
HASANY : [Hh][Aa][Ss][Aa][Nn][Yy] ;
|
||||
HASALL : [Hh][Aa][Ss][Aa][Ll][Ll] ;
|
||||
HASNONE : [Hh][Aa][Ss][Nn][Oo][Nn][Ee] ;
|
||||
|
||||
// Potential boolean constants
|
||||
BOOL
|
||||
: [Tt][Rr][Uu][Ee]
|
||||
| [Ff][Aa][Ll][Ss][Ee]
|
||||
;
|
||||
|
||||
// Numbers (integer or float). Adjust as needed for your domain.
|
||||
NUMBER
|
||||
: DIGIT+ ( '.' DIGIT+ )?
|
||||
;
|
||||
|
||||
// Double/single-quoted text, capturing full text search strings, values, etc.
|
||||
QUOTED_TEXT
|
||||
: ( '"' ( ~["\\] | '\\' . )* '"' // double-quoted
|
||||
| '\'' ( ~['\\] | '\\' . )* '\'' // single-quoted
|
||||
)
|
||||
;
|
||||
|
||||
// Keys can have letters, digits, underscores, dots, and bracket pairs
|
||||
// e.g. service.name, service.namespace, db.queries[].query_duration
|
||||
KEY
|
||||
: [a-zA-Z0-9_] [a-zA-Z0-9_.[\]]*
|
||||
;
|
||||
|
||||
// Ignore whitespace
|
||||
WS
|
||||
: [ \t\r\n]+ -> skip
|
||||
;
|
||||
|
||||
// Digits used by NUMBER
|
||||
fragment DIGIT
|
||||
: [0-9]
|
||||
;
|
||||
|
||||
FREETEXT : (~[ \t\r\n=()'"<>![\]])+ ;
|
||||
@@ -1 +0,0 @@
|
||||
package flagger
|
||||
@@ -1,35 +0,0 @@
|
||||
package flagger
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/factory"
|
||||
|
||||
var _ factory.Config = Config{}
|
||||
|
||||
type Config struct {
|
||||
Provider string `json:"provider"`
|
||||
Memory Memory `json:"memory"`
|
||||
}
|
||||
|
||||
type Memory struct {
|
||||
Boolean Boolean `json:"boolean"`
|
||||
}
|
||||
|
||||
type Boolean struct {
|
||||
Enabled []string `json:"enabled"`
|
||||
Disabled []string `json:"disabled"`
|
||||
}
|
||||
|
||||
func NewConfigFactory() factory.ConfigFactory {
|
||||
return factory.NewConfigFactory(factory.MustNewName("flagger"), newConfig)
|
||||
}
|
||||
|
||||
func newConfig() factory.Config {
|
||||
return &Config{
|
||||
Provider: "memory",
|
||||
Memory: Memory{},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package flagger
|
||||
|
||||
import (
|
||||
"github.com/open-feature/go-sdk/openfeature"
|
||||
)
|
||||
|
||||
type Provider = openfeature.FeatureProvider
|
||||
|
||||
type FlaggerHook = openfeature.Hook
|
||||
|
||||
type Flagger = openfeature.IClient
|
||||
|
||||
type flagger struct {
|
||||
*openfeature.Client
|
||||
}
|
||||
|
||||
func New(provider Provider, hooks ...FlaggerHook) (Flagger, error) {
|
||||
client := openfeature.NewClient("signoz")
|
||||
|
||||
if err := openfeature.SetNamedProviderAndWait("signoz", provider); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client.AddHooks(hooks...)
|
||||
|
||||
return &flagger{Client: client}, nil
|
||||
}
|
||||
@@ -1,241 +0,0 @@
|
||||
package memoryprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/open-feature/go-sdk/openfeature"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
config flagger.Config
|
||||
settings factory.ScopedProviderSettings
|
||||
featureValues map[featuretypes.Name]featuretypes.FeatureValue
|
||||
registry featuretypes.Registry
|
||||
}
|
||||
|
||||
func NewFactory(registry featuretypes.Registry) factory.ProviderFactory[flagger.Provider, flagger.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("memory"), func(ctx context.Context, providerSettings factory.ProviderSettings, config flagger.Config) (flagger.Provider, error) {
|
||||
return New(ctx, providerSettings, config, registry)
|
||||
})
|
||||
}
|
||||
|
||||
func New(ctx context.Context, providerSettings factory.ProviderSettings, config flagger.Config, registry featuretypes.Registry) (flagger.Provider, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/flagger/memoryprovider")
|
||||
|
||||
featureValues := make(map[featuretypes.Name]featuretypes.FeatureValue)
|
||||
for _, flag := range config.Memory.Boolean.Enabled {
|
||||
name, err := featuretypes.NewName(flag)
|
||||
if err != nil {
|
||||
settings.Logger().Error("invalid flag name encountered", "flag", flag, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
featureValues[name] = featuretypes.FeatureValue{
|
||||
Name: name,
|
||||
Variant: featuretypes.KindBooleanVariantEnabled,
|
||||
}
|
||||
}
|
||||
|
||||
for _, flag := range config.Memory.Boolean.Disabled {
|
||||
name, err := featuretypes.NewName(flag)
|
||||
if err != nil {
|
||||
settings.Logger().Error("invalid flag name encountered", "flag", flag, "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := featureValues[name]; ok {
|
||||
settings.Logger().Error("flag already exists and has been enabled", "flag", flag)
|
||||
continue
|
||||
}
|
||||
|
||||
featureValues[name] = featuretypes.FeatureValue{
|
||||
Name: name,
|
||||
Variant: featuretypes.KindBooleanVariantDisabled,
|
||||
}
|
||||
}
|
||||
|
||||
return &provider{
|
||||
config: config,
|
||||
settings: settings,
|
||||
featureValues: featureValues,
|
||||
registry: registry,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Metadata() openfeature.Metadata {
|
||||
return openfeature.Metadata{
|
||||
Name: "memory",
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) BooleanEvaluation(ctx context.Context, flag string, defaultValue bool, evalCtx openfeature.FlattenedContext) openfeature.BoolResolutionDetail {
|
||||
feature, detail, err := provider.registry.Get(flag)
|
||||
if err != nil {
|
||||
return openfeature.BoolResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
if featureValue, ok := provider.featureValues[feature.Name]; ok {
|
||||
value, detail, err := featuretypes.GetVariantValue[bool](feature, featureValue.Variant)
|
||||
if err != nil {
|
||||
return openfeature.BoolResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
return openfeature.BoolResolutionDetail{
|
||||
Value: value,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
return openfeature.BoolResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: openfeature.ProviderResolutionDetail{
|
||||
Reason: openfeature.StaticReason,
|
||||
Variant: feature.DefaultVariant,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) StringEvaluation(ctx context.Context, flag string, defaultValue string, evalCtx openfeature.FlattenedContext) openfeature.StringResolutionDetail {
|
||||
feature, detail, err := provider.registry.Get(flag)
|
||||
if err != nil {
|
||||
return openfeature.StringResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
if featureValue, ok := provider.featureValues[feature.Name]; ok {
|
||||
value, detail, err := featuretypes.GetVariantValue[string](feature, featureValue.Variant)
|
||||
if err != nil {
|
||||
return openfeature.StringResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
return openfeature.StringResolutionDetail{
|
||||
Value: value,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
return openfeature.StringResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: openfeature.ProviderResolutionDetail{
|
||||
Reason: openfeature.StaticReason,
|
||||
Variant: feature.DefaultVariant,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) FloatEvaluation(ctx context.Context, flag string, defaultValue float64, evalCtx openfeature.FlattenedContext) openfeature.FloatResolutionDetail {
|
||||
feature, detail, err := provider.registry.Get(flag)
|
||||
if err != nil {
|
||||
return openfeature.FloatResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
if featureValue, ok := provider.featureValues[feature.Name]; ok {
|
||||
value, detail, err := featuretypes.GetVariantValue[float64](feature, featureValue.Variant)
|
||||
if err != nil {
|
||||
return openfeature.FloatResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
return openfeature.FloatResolutionDetail{
|
||||
Value: value,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
return openfeature.FloatResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: openfeature.ProviderResolutionDetail{
|
||||
Reason: openfeature.StaticReason,
|
||||
Variant: feature.DefaultVariant,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) IntEvaluation(ctx context.Context, flag string, defaultValue int64, evalCtx openfeature.FlattenedContext) openfeature.IntResolutionDetail {
|
||||
feature, detail, err := provider.registry.Get(flag)
|
||||
if err != nil {
|
||||
return openfeature.IntResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
if featureValue, ok := provider.featureValues[feature.Name]; ok {
|
||||
value, detail, err := featuretypes.GetVariantValue[int64](feature, featureValue.Variant)
|
||||
if err != nil {
|
||||
return openfeature.IntResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
return openfeature.IntResolutionDetail{
|
||||
Value: value,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
return openfeature.IntResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: openfeature.ProviderResolutionDetail{
|
||||
Reason: openfeature.StaticReason,
|
||||
Variant: feature.DefaultVariant,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) ObjectEvaluation(ctx context.Context, flag string, defaultValue interface{}, evalCtx openfeature.FlattenedContext) openfeature.InterfaceResolutionDetail {
|
||||
feature, detail, err := provider.registry.Get(flag)
|
||||
if err != nil {
|
||||
return openfeature.InterfaceResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
if featureValue, ok := provider.featureValues[feature.Name]; ok {
|
||||
value, detail, err := featuretypes.GetVariantValue[interface{}](feature, featureValue.Variant)
|
||||
if err != nil {
|
||||
return openfeature.InterfaceResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
return openfeature.InterfaceResolutionDetail{
|
||||
Value: value,
|
||||
ProviderResolutionDetail: detail,
|
||||
}
|
||||
}
|
||||
|
||||
return openfeature.InterfaceResolutionDetail{
|
||||
Value: defaultValue,
|
||||
ProviderResolutionDetail: openfeature.ProviderResolutionDetail{
|
||||
Reason: openfeature.StaticReason,
|
||||
Variant: feature.DefaultVariant,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *provider) Hooks() []openfeature.Hook {
|
||||
return []openfeature.Hook{}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package flagger
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
|
||||
var (
|
||||
FeatureUseTracesNewSchema = featuretypes.MustNewName("UseTracesNewSchema")
|
||||
FeatureUseLogsNewSchema = featuretypes.MustNewName("UseLogsNewSchema")
|
||||
)
|
||||
|
||||
func MustNewRegistry() featuretypes.Registry {
|
||||
registry, err := featuretypes.NewRegistry(
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureUseTracesNewSchema,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
Description: "Use new traces schema.",
|
||||
Stage: featuretypes.StageStable,
|
||||
DefaultVariant: featuretypes.KindBooleanVariantDisabled,
|
||||
Variants: featuretypes.NewBooleanFeatureVariants(),
|
||||
},
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureUseLogsNewSchema,
|
||||
Kind: featuretypes.KindBoolean,
|
||||
Description: "Use new logs schema.",
|
||||
Stage: featuretypes.StageStable,
|
||||
DefaultVariant: featuretypes.KindBooleanVariantDisabled,
|
||||
Variants: featuretypes.NewBooleanFeatureVariants(),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return registry
|
||||
}
|
||||
@@ -17,7 +17,7 @@ type Client struct {
|
||||
netc *http.Client
|
||||
}
|
||||
|
||||
func New(logger *slog.Logger, tracerProvider trace.TracerProvider, meterProvider metric.MeterProvider, opts ...Option) *Client {
|
||||
func New(logger *slog.Logger, tracerProvider trace.TracerProvider, meterProvider metric.MeterProvider, opts ...Option) (*Client, error) {
|
||||
clientOpts := options{
|
||||
retryCount: 3,
|
||||
requestResponseLog: false,
|
||||
@@ -46,7 +46,7 @@ func New(logger *slog.Logger, tracerProvider trace.TracerProvider, meterProvider
|
||||
return &Client{
|
||||
netc: netc,
|
||||
c: c,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) Do(request *http.Request) (*http.Response, error) {
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package licensing
|
||||
|
||||
// type API interface {
|
||||
// GetLicenses(context.Context, valuer.UUID, licensetypes.GettableLicenseParams) (licensetypes.GettableLicenses, error)
|
||||
// }
|
||||
|
||||
// func Get(){
|
||||
// //
|
||||
// return 501
|
||||
// }
|
||||
|
||||
// func Put() {
|
||||
// //400 bad request
|
||||
|
||||
// //501
|
||||
// }
|
||||
@@ -1,17 +0,0 @@
|
||||
package licensing
|
||||
|
||||
import "time"
|
||||
|
||||
type Config struct {
|
||||
Provider string `mapstructure:"provider"`
|
||||
|
||||
PollingConfig PollingConfig `mapstructure:"polling"`
|
||||
}
|
||||
|
||||
type PollingConfig struct {
|
||||
Interval time.Duration `mapstructure:"interval"`
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package licensing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrCodeLicensingServerNotFound = errors.MustNewCode("licensing_server_not_found")
|
||||
)
|
||||
|
||||
type Licensing interface {
|
||||
factory.Service
|
||||
|
||||
// GetLicenses gets the licenses for the organization.
|
||||
GetLicenses(context.Context, valuer.UUID, licensetypes.GettableLicenseParams) (licensetypes.GettableLicenses, error)
|
||||
|
||||
// GetLatestLicense gets the latest license for the organization.
|
||||
GetLatestLicense(context.Context, valuer.UUID) (licensetypes.License, error)
|
||||
|
||||
// SetLicense sets the license for the organization.
|
||||
SetLicense(context.Context, valuer.UUID, string) error
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package nooplicensing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
stopC chan struct{}
|
||||
license licensetypes.License
|
||||
}
|
||||
|
||||
func NewFactory() factory.ProviderFactory[licensing.Licensing, licensing.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("noop"), func(ctx context.Context, providerSettings factory.ProviderSettings, config licensing.Config) (licensing.Licensing, error) {
|
||||
return New(ctx, providerSettings, config)
|
||||
})
|
||||
}
|
||||
|
||||
func New(ctx context.Context, providerSettings factory.ProviderSettings, config licensing.Config) (licensing.Licensing, error) {
|
||||
return &provider{
|
||||
stopC: make(chan struct{}),
|
||||
license: licensetypes.NewNoop(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Start(ctx context.Context) error {
|
||||
<-provider.stopC
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) GetLicenses(ctx context.Context, orgID valuer.UUID, params licensetypes.GettableLicenseParams) (licensetypes.GettableLicenses, error) {
|
||||
return licensetypes.GettableLicenses{provider.license}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) GetLatestLicense(ctx context.Context, orgID valuer.UUID) (licensetypes.License, error) {
|
||||
return provider.license, nil
|
||||
}
|
||||
|
||||
func (provider *provider) SetLicense(ctx context.Context, orgID valuer.UUID, key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) Stop(ctx context.Context) error {
|
||||
close(provider.stopC)
|
||||
return nil
|
||||
}
|
||||
96
pkg/parser/grammar/FilterQuery.interp
Normal file
96
pkg/parser/grammar/FilterQuery.interp
Normal file
File diff suppressed because one or more lines are too long
45
pkg/parser/grammar/FilterQuery.tokens
Normal file
45
pkg/parser/grammar/FilterQuery.tokens
Normal file
@@ -0,0 +1,45 @@
|
||||
LPAREN=1
|
||||
RPAREN=2
|
||||
LBRACK=3
|
||||
RBRACK=4
|
||||
COMMA=5
|
||||
EQUALS=6
|
||||
NOT_EQUALS=7
|
||||
NEQ=8
|
||||
LT=9
|
||||
LE=10
|
||||
GT=11
|
||||
GE=12
|
||||
LIKE=13
|
||||
NOT_LIKE=14
|
||||
ILIKE=15
|
||||
NOT_ILIKE=16
|
||||
BETWEEN=17
|
||||
EXISTS=18
|
||||
REGEXP=19
|
||||
CONTAINS=20
|
||||
IN=21
|
||||
NOT=22
|
||||
AND=23
|
||||
OR=24
|
||||
HAS=25
|
||||
HASANY=26
|
||||
HASALL=27
|
||||
HASNONE=28
|
||||
BOOL=29
|
||||
NUMBER=30
|
||||
QUOTED_TEXT=31
|
||||
KEY=32
|
||||
WS=33
|
||||
FREETEXT=34
|
||||
'('=1
|
||||
')'=2
|
||||
'['=3
|
||||
']'=4
|
||||
','=5
|
||||
'!='=7
|
||||
'<>'=8
|
||||
'<'=9
|
||||
'<='=10
|
||||
'>'=11
|
||||
'>='=12
|
||||
120
pkg/parser/grammar/FilterQueryLexer.interp
Normal file
120
pkg/parser/grammar/FilterQueryLexer.interp
Normal file
File diff suppressed because one or more lines are too long
45
pkg/parser/grammar/FilterQueryLexer.tokens
Normal file
45
pkg/parser/grammar/FilterQueryLexer.tokens
Normal file
@@ -0,0 +1,45 @@
|
||||
LPAREN=1
|
||||
RPAREN=2
|
||||
LBRACK=3
|
||||
RBRACK=4
|
||||
COMMA=5
|
||||
EQUALS=6
|
||||
NOT_EQUALS=7
|
||||
NEQ=8
|
||||
LT=9
|
||||
LE=10
|
||||
GT=11
|
||||
GE=12
|
||||
LIKE=13
|
||||
NOT_LIKE=14
|
||||
ILIKE=15
|
||||
NOT_ILIKE=16
|
||||
BETWEEN=17
|
||||
EXISTS=18
|
||||
REGEXP=19
|
||||
CONTAINS=20
|
||||
IN=21
|
||||
NOT=22
|
||||
AND=23
|
||||
OR=24
|
||||
HAS=25
|
||||
HASANY=26
|
||||
HASALL=27
|
||||
HASNONE=28
|
||||
BOOL=29
|
||||
NUMBER=30
|
||||
QUOTED_TEXT=31
|
||||
KEY=32
|
||||
WS=33
|
||||
FREETEXT=34
|
||||
'('=1
|
||||
')'=2
|
||||
'['=3
|
||||
']'=4
|
||||
','=5
|
||||
'!='=7
|
||||
'<>'=8
|
||||
'<'=9
|
||||
'<='=10
|
||||
'>'=11
|
||||
'>='=12
|
||||
124
pkg/parser/grammar/filterquery_base_listener.go
Normal file
124
pkg/parser/grammar/filterquery_base_listener.go
Normal file
@@ -0,0 +1,124 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
// BaseFilterQueryListener is a complete listener for a parse tree produced by FilterQueryParser.
|
||||
type BaseFilterQueryListener struct{}
|
||||
|
||||
var _ FilterQueryListener = &BaseFilterQueryListener{}
|
||||
|
||||
// VisitTerminal is called when a terminal node is visited.
|
||||
func (s *BaseFilterQueryListener) VisitTerminal(node antlr.TerminalNode) {}
|
||||
|
||||
// VisitErrorNode is called when an error node is visited.
|
||||
func (s *BaseFilterQueryListener) VisitErrorNode(node antlr.ErrorNode) {}
|
||||
|
||||
// EnterEveryRule is called when any rule is entered.
|
||||
func (s *BaseFilterQueryListener) EnterEveryRule(ctx antlr.ParserRuleContext) {}
|
||||
|
||||
// ExitEveryRule is called when any rule is exited.
|
||||
func (s *BaseFilterQueryListener) ExitEveryRule(ctx antlr.ParserRuleContext) {}
|
||||
|
||||
// EnterQuery is called when production query is entered.
|
||||
func (s *BaseFilterQueryListener) EnterQuery(ctx *QueryContext) {}
|
||||
|
||||
// ExitQuery is called when production query is exited.
|
||||
func (s *BaseFilterQueryListener) ExitQuery(ctx *QueryContext) {}
|
||||
|
||||
// EnterExpression is called when production expression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterExpression(ctx *ExpressionContext) {}
|
||||
|
||||
// ExitExpression is called when production expression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitExpression(ctx *ExpressionContext) {}
|
||||
|
||||
// EnterOrExpression is called when production orExpression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterOrExpression(ctx *OrExpressionContext) {}
|
||||
|
||||
// ExitOrExpression is called when production orExpression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitOrExpression(ctx *OrExpressionContext) {}
|
||||
|
||||
// EnterAndExpression is called when production andExpression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterAndExpression(ctx *AndExpressionContext) {}
|
||||
|
||||
// ExitAndExpression is called when production andExpression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitAndExpression(ctx *AndExpressionContext) {}
|
||||
|
||||
// EnterUnaryExpression is called when production unaryExpression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterUnaryExpression(ctx *UnaryExpressionContext) {}
|
||||
|
||||
// ExitUnaryExpression is called when production unaryExpression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitUnaryExpression(ctx *UnaryExpressionContext) {}
|
||||
|
||||
// EnterPrimary is called when production primary is entered.
|
||||
func (s *BaseFilterQueryListener) EnterPrimary(ctx *PrimaryContext) {}
|
||||
|
||||
// ExitPrimary is called when production primary is exited.
|
||||
func (s *BaseFilterQueryListener) ExitPrimary(ctx *PrimaryContext) {}
|
||||
|
||||
// EnterComparison is called when production comparison is entered.
|
||||
func (s *BaseFilterQueryListener) EnterComparison(ctx *ComparisonContext) {}
|
||||
|
||||
// ExitComparison is called when production comparison is exited.
|
||||
func (s *BaseFilterQueryListener) ExitComparison(ctx *ComparisonContext) {}
|
||||
|
||||
// EnterInClause is called when production inClause is entered.
|
||||
func (s *BaseFilterQueryListener) EnterInClause(ctx *InClauseContext) {}
|
||||
|
||||
// ExitInClause is called when production inClause is exited.
|
||||
func (s *BaseFilterQueryListener) ExitInClause(ctx *InClauseContext) {}
|
||||
|
||||
// EnterNotInClause is called when production notInClause is entered.
|
||||
func (s *BaseFilterQueryListener) EnterNotInClause(ctx *NotInClauseContext) {}
|
||||
|
||||
// ExitNotInClause is called when production notInClause is exited.
|
||||
func (s *BaseFilterQueryListener) ExitNotInClause(ctx *NotInClauseContext) {}
|
||||
|
||||
// EnterValueList is called when production valueList is entered.
|
||||
func (s *BaseFilterQueryListener) EnterValueList(ctx *ValueListContext) {}
|
||||
|
||||
// ExitValueList is called when production valueList is exited.
|
||||
func (s *BaseFilterQueryListener) ExitValueList(ctx *ValueListContext) {}
|
||||
|
||||
// EnterFullText is called when production fullText is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFullText(ctx *FullTextContext) {}
|
||||
|
||||
// ExitFullText is called when production fullText is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFullText(ctx *FullTextContext) {}
|
||||
|
||||
// EnterFunctionCall is called when production functionCall is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFunctionCall(ctx *FunctionCallContext) {}
|
||||
|
||||
// ExitFunctionCall is called when production functionCall is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFunctionCall(ctx *FunctionCallContext) {}
|
||||
|
||||
// EnterFunctionParamList is called when production functionParamList is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFunctionParamList(ctx *FunctionParamListContext) {}
|
||||
|
||||
// ExitFunctionParamList is called when production functionParamList is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFunctionParamList(ctx *FunctionParamListContext) {}
|
||||
|
||||
// EnterFunctionParam is called when production functionParam is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFunctionParam(ctx *FunctionParamContext) {}
|
||||
|
||||
// ExitFunctionParam is called when production functionParam is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFunctionParam(ctx *FunctionParamContext) {}
|
||||
|
||||
// EnterArray is called when production array is entered.
|
||||
func (s *BaseFilterQueryListener) EnterArray(ctx *ArrayContext) {}
|
||||
|
||||
// ExitArray is called when production array is exited.
|
||||
func (s *BaseFilterQueryListener) ExitArray(ctx *ArrayContext) {}
|
||||
|
||||
// EnterValue is called when production value is entered.
|
||||
func (s *BaseFilterQueryListener) EnterValue(ctx *ValueContext) {}
|
||||
|
||||
// ExitValue is called when production value is exited.
|
||||
func (s *BaseFilterQueryListener) ExitValue(ctx *ValueContext) {}
|
||||
|
||||
// EnterKey is called when production key is entered.
|
||||
func (s *BaseFilterQueryListener) EnterKey(ctx *KeyContext) {}
|
||||
|
||||
// ExitKey is called when production key is exited.
|
||||
func (s *BaseFilterQueryListener) ExitKey(ctx *KeyContext) {}
|
||||
77
pkg/parser/grammar/filterquery_base_visitor.go
Normal file
77
pkg/parser/grammar/filterquery_base_visitor.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
type BaseFilterQueryVisitor struct {
|
||||
*antlr.BaseParseTreeVisitor
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitQuery(ctx *QueryContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitExpression(ctx *ExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitOrExpression(ctx *OrExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitAndExpression(ctx *AndExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitUnaryExpression(ctx *UnaryExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitPrimary(ctx *PrimaryContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitComparison(ctx *ComparisonContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitInClause(ctx *InClauseContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitNotInClause(ctx *NotInClauseContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitValueList(ctx *ValueListContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFullText(ctx *FullTextContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFunctionCall(ctx *FunctionCallContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFunctionParamList(ctx *FunctionParamListContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFunctionParam(ctx *FunctionParamContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitArray(ctx *ArrayContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitValue(ctx *ValueContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitKey(ctx *KeyContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
271
pkg/parser/grammar/filterquery_lexer.go
Normal file
271
pkg/parser/grammar/filterquery_lexer.go
Normal file
@@ -0,0 +1,271 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/antlr4-go/antlr/v4"
|
||||
"sync"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Suppress unused import error
|
||||
var _ = fmt.Printf
|
||||
var _ = sync.Once{}
|
||||
var _ = unicode.IsLetter
|
||||
|
||||
type FilterQueryLexer struct {
|
||||
*antlr.BaseLexer
|
||||
channelNames []string
|
||||
modeNames []string
|
||||
// TODO: EOF string
|
||||
}
|
||||
|
||||
var FilterQueryLexerLexerStaticData struct {
|
||||
once sync.Once
|
||||
serializedATN []int32
|
||||
ChannelNames []string
|
||||
ModeNames []string
|
||||
LiteralNames []string
|
||||
SymbolicNames []string
|
||||
RuleNames []string
|
||||
PredictionContextCache *antlr.PredictionContextCache
|
||||
atn *antlr.ATN
|
||||
decisionToDFA []*antlr.DFA
|
||||
}
|
||||
|
||||
func filterquerylexerLexerInit() {
|
||||
staticData := &FilterQueryLexerLexerStaticData
|
||||
staticData.ChannelNames = []string{
|
||||
"DEFAULT_TOKEN_CHANNEL", "HIDDEN",
|
||||
}
|
||||
staticData.ModeNames = []string{
|
||||
"DEFAULT_MODE",
|
||||
}
|
||||
staticData.LiteralNames = []string{
|
||||
"", "'('", "')'", "'['", "']'", "','", "", "'!='", "'<>'", "'<'", "'<='",
|
||||
"'>'", "'>='",
|
||||
}
|
||||
staticData.SymbolicNames = []string{
|
||||
"", "LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
|
||||
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "NOT_LIKE", "ILIKE", "NOT_ILIKE",
|
||||
"BETWEEN", "EXISTS", "REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR",
|
||||
"HAS", "HASANY", "HASALL", "HASNONE", "BOOL", "NUMBER", "QUOTED_TEXT",
|
||||
"KEY", "WS", "FREETEXT",
|
||||
}
|
||||
staticData.RuleNames = []string{
|
||||
"LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
|
||||
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "NOT_LIKE", "ILIKE", "NOT_ILIKE",
|
||||
"BETWEEN", "EXISTS", "REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR",
|
||||
"HAS", "HASANY", "HASALL", "HASNONE", "BOOL", "NUMBER", "QUOTED_TEXT",
|
||||
"KEY", "WS", "DIGIT", "FREETEXT",
|
||||
}
|
||||
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
|
||||
staticData.serializedATN = []int32{
|
||||
4, 0, 34, 280, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
|
||||
4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
|
||||
10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
|
||||
7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
|
||||
20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
|
||||
2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
|
||||
31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 1, 0, 1, 0, 1, 1,
|
||||
1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 3, 5, 85, 8,
|
||||
5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1,
|
||||
10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13,
|
||||
1, 13, 1, 13, 1, 13, 4, 13, 112, 8, 13, 11, 13, 12, 13, 113, 1, 13, 1,
|
||||
13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15,
|
||||
1, 15, 1, 15, 1, 15, 4, 15, 131, 8, 15, 11, 15, 12, 15, 132, 1, 15, 1,
|
||||
15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16,
|
||||
1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 3, 17, 155, 8,
|
||||
17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19,
|
||||
1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 172, 8, 19, 1, 20, 1, 20, 1,
|
||||
20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23,
|
||||
1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1,
|
||||
25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27,
|
||||
1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1,
|
||||
28, 1, 28, 1, 28, 1, 28, 1, 28, 3, 28, 223, 8, 28, 1, 29, 4, 29, 226, 8,
|
||||
29, 11, 29, 12, 29, 227, 1, 29, 1, 29, 4, 29, 232, 8, 29, 11, 29, 12, 29,
|
||||
233, 3, 29, 236, 8, 29, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 242, 8, 30,
|
||||
10, 30, 12, 30, 245, 9, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 252,
|
||||
8, 30, 10, 30, 12, 30, 255, 9, 30, 1, 30, 3, 30, 258, 8, 30, 1, 31, 1,
|
||||
31, 5, 31, 262, 8, 31, 10, 31, 12, 31, 265, 9, 31, 1, 32, 4, 32, 268, 8,
|
||||
32, 11, 32, 12, 32, 269, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 4, 34, 277,
|
||||
8, 34, 11, 34, 12, 34, 278, 0, 0, 35, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11,
|
||||
6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15,
|
||||
31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24,
|
||||
49, 25, 51, 26, 53, 27, 55, 28, 57, 29, 59, 30, 61, 31, 63, 32, 65, 33,
|
||||
67, 0, 69, 34, 1, 0, 29, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105,
|
||||
2, 0, 75, 75, 107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 78, 78, 110, 110,
|
||||
2, 0, 79, 79, 111, 111, 2, 0, 84, 84, 116, 116, 2, 0, 9, 9, 32, 32, 2,
|
||||
0, 66, 66, 98, 98, 2, 0, 87, 87, 119, 119, 2, 0, 88, 88, 120, 120, 2, 0,
|
||||
83, 83, 115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71, 71, 103, 103, 2, 0,
|
||||
80, 80, 112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 65, 65, 97, 97, 2, 0, 68,
|
||||
68, 100, 100, 2, 0, 72, 72, 104, 104, 2, 0, 89, 89, 121, 121, 2, 0, 85,
|
||||
85, 117, 117, 2, 0, 70, 70, 102, 102, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39,
|
||||
92, 92, 4, 0, 48, 57, 65, 90, 95, 95, 97, 122, 6, 0, 46, 46, 48, 57, 65,
|
||||
91, 93, 93, 95, 95, 97, 122, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57,
|
||||
7, 0, 9, 10, 13, 13, 32, 34, 39, 41, 60, 62, 91, 91, 93, 93, 295, 0, 1,
|
||||
1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9,
|
||||
1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0,
|
||||
17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0,
|
||||
0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0,
|
||||
0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0,
|
||||
0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1,
|
||||
0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55,
|
||||
1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0,
|
||||
63, 1, 0, 0, 0, 0, 65, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 1, 71, 1, 0, 0, 0,
|
||||
3, 73, 1, 0, 0, 0, 5, 75, 1, 0, 0, 0, 7, 77, 1, 0, 0, 0, 9, 79, 1, 0, 0,
|
||||
0, 11, 84, 1, 0, 0, 0, 13, 86, 1, 0, 0, 0, 15, 89, 1, 0, 0, 0, 17, 92,
|
||||
1, 0, 0, 0, 19, 94, 1, 0, 0, 0, 21, 97, 1, 0, 0, 0, 23, 99, 1, 0, 0, 0,
|
||||
25, 102, 1, 0, 0, 0, 27, 107, 1, 0, 0, 0, 29, 120, 1, 0, 0, 0, 31, 126,
|
||||
1, 0, 0, 0, 33, 140, 1, 0, 0, 0, 35, 148, 1, 0, 0, 0, 37, 156, 1, 0, 0,
|
||||
0, 39, 163, 1, 0, 0, 0, 41, 173, 1, 0, 0, 0, 43, 176, 1, 0, 0, 0, 45, 180,
|
||||
1, 0, 0, 0, 47, 184, 1, 0, 0, 0, 49, 187, 1, 0, 0, 0, 51, 191, 1, 0, 0,
|
||||
0, 53, 198, 1, 0, 0, 0, 55, 205, 1, 0, 0, 0, 57, 222, 1, 0, 0, 0, 59, 225,
|
||||
1, 0, 0, 0, 61, 257, 1, 0, 0, 0, 63, 259, 1, 0, 0, 0, 65, 267, 1, 0, 0,
|
||||
0, 67, 273, 1, 0, 0, 0, 69, 276, 1, 0, 0, 0, 71, 72, 5, 40, 0, 0, 72, 2,
|
||||
1, 0, 0, 0, 73, 74, 5, 41, 0, 0, 74, 4, 1, 0, 0, 0, 75, 76, 5, 91, 0, 0,
|
||||
76, 6, 1, 0, 0, 0, 77, 78, 5, 93, 0, 0, 78, 8, 1, 0, 0, 0, 79, 80, 5, 44,
|
||||
0, 0, 80, 10, 1, 0, 0, 0, 81, 85, 5, 61, 0, 0, 82, 83, 5, 61, 0, 0, 83,
|
||||
85, 5, 61, 0, 0, 84, 81, 1, 0, 0, 0, 84, 82, 1, 0, 0, 0, 85, 12, 1, 0,
|
||||
0, 0, 86, 87, 5, 33, 0, 0, 87, 88, 5, 61, 0, 0, 88, 14, 1, 0, 0, 0, 89,
|
||||
90, 5, 60, 0, 0, 90, 91, 5, 62, 0, 0, 91, 16, 1, 0, 0, 0, 92, 93, 5, 60,
|
||||
0, 0, 93, 18, 1, 0, 0, 0, 94, 95, 5, 60, 0, 0, 95, 96, 5, 61, 0, 0, 96,
|
||||
20, 1, 0, 0, 0, 97, 98, 5, 62, 0, 0, 98, 22, 1, 0, 0, 0, 99, 100, 5, 62,
|
||||
0, 0, 100, 101, 5, 61, 0, 0, 101, 24, 1, 0, 0, 0, 102, 103, 7, 0, 0, 0,
|
||||
103, 104, 7, 1, 0, 0, 104, 105, 7, 2, 0, 0, 105, 106, 7, 3, 0, 0, 106,
|
||||
26, 1, 0, 0, 0, 107, 108, 7, 4, 0, 0, 108, 109, 7, 5, 0, 0, 109, 111, 7,
|
||||
6, 0, 0, 110, 112, 7, 7, 0, 0, 111, 110, 1, 0, 0, 0, 112, 113, 1, 0, 0,
|
||||
0, 113, 111, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115,
|
||||
116, 7, 0, 0, 0, 116, 117, 7, 1, 0, 0, 117, 118, 7, 2, 0, 0, 118, 119,
|
||||
7, 3, 0, 0, 119, 28, 1, 0, 0, 0, 120, 121, 7, 1, 0, 0, 121, 122, 7, 0,
|
||||
0, 0, 122, 123, 7, 1, 0, 0, 123, 124, 7, 2, 0, 0, 124, 125, 7, 3, 0, 0,
|
||||
125, 30, 1, 0, 0, 0, 126, 127, 7, 4, 0, 0, 127, 128, 7, 5, 0, 0, 128, 130,
|
||||
7, 6, 0, 0, 129, 131, 7, 7, 0, 0, 130, 129, 1, 0, 0, 0, 131, 132, 1, 0,
|
||||
0, 0, 132, 130, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0,
|
||||
134, 135, 7, 1, 0, 0, 135, 136, 7, 0, 0, 0, 136, 137, 7, 1, 0, 0, 137,
|
||||
138, 7, 2, 0, 0, 138, 139, 7, 3, 0, 0, 139, 32, 1, 0, 0, 0, 140, 141, 7,
|
||||
8, 0, 0, 141, 142, 7, 3, 0, 0, 142, 143, 7, 6, 0, 0, 143, 144, 7, 9, 0,
|
||||
0, 144, 145, 7, 3, 0, 0, 145, 146, 7, 3, 0, 0, 146, 147, 7, 4, 0, 0, 147,
|
||||
34, 1, 0, 0, 0, 148, 149, 7, 3, 0, 0, 149, 150, 7, 10, 0, 0, 150, 151,
|
||||
7, 1, 0, 0, 151, 152, 7, 11, 0, 0, 152, 154, 7, 6, 0, 0, 153, 155, 7, 11,
|
||||
0, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 36, 1, 0, 0, 0,
|
||||
156, 157, 7, 12, 0, 0, 157, 158, 7, 3, 0, 0, 158, 159, 7, 13, 0, 0, 159,
|
||||
160, 7, 3, 0, 0, 160, 161, 7, 10, 0, 0, 161, 162, 7, 14, 0, 0, 162, 38,
|
||||
1, 0, 0, 0, 163, 164, 7, 15, 0, 0, 164, 165, 7, 5, 0, 0, 165, 166, 7, 4,
|
||||
0, 0, 166, 167, 7, 6, 0, 0, 167, 168, 7, 16, 0, 0, 168, 169, 7, 1, 0, 0,
|
||||
169, 171, 7, 4, 0, 0, 170, 172, 7, 11, 0, 0, 171, 170, 1, 0, 0, 0, 171,
|
||||
172, 1, 0, 0, 0, 172, 40, 1, 0, 0, 0, 173, 174, 7, 1, 0, 0, 174, 175, 7,
|
||||
4, 0, 0, 175, 42, 1, 0, 0, 0, 176, 177, 7, 4, 0, 0, 177, 178, 7, 5, 0,
|
||||
0, 178, 179, 7, 6, 0, 0, 179, 44, 1, 0, 0, 0, 180, 181, 7, 16, 0, 0, 181,
|
||||
182, 7, 4, 0, 0, 182, 183, 7, 17, 0, 0, 183, 46, 1, 0, 0, 0, 184, 185,
|
||||
7, 5, 0, 0, 185, 186, 7, 12, 0, 0, 186, 48, 1, 0, 0, 0, 187, 188, 7, 18,
|
||||
0, 0, 188, 189, 7, 16, 0, 0, 189, 190, 7, 11, 0, 0, 190, 50, 1, 0, 0, 0,
|
||||
191, 192, 7, 18, 0, 0, 192, 193, 7, 16, 0, 0, 193, 194, 7, 11, 0, 0, 194,
|
||||
195, 7, 16, 0, 0, 195, 196, 7, 4, 0, 0, 196, 197, 7, 19, 0, 0, 197, 52,
|
||||
1, 0, 0, 0, 198, 199, 7, 18, 0, 0, 199, 200, 7, 16, 0, 0, 200, 201, 7,
|
||||
11, 0, 0, 201, 202, 7, 16, 0, 0, 202, 203, 7, 0, 0, 0, 203, 204, 7, 0,
|
||||
0, 0, 204, 54, 1, 0, 0, 0, 205, 206, 7, 18, 0, 0, 206, 207, 7, 16, 0, 0,
|
||||
207, 208, 7, 11, 0, 0, 208, 209, 7, 4, 0, 0, 209, 210, 7, 5, 0, 0, 210,
|
||||
211, 7, 4, 0, 0, 211, 212, 7, 3, 0, 0, 212, 56, 1, 0, 0, 0, 213, 214, 7,
|
||||
6, 0, 0, 214, 215, 7, 12, 0, 0, 215, 216, 7, 20, 0, 0, 216, 223, 7, 3,
|
||||
0, 0, 217, 218, 7, 21, 0, 0, 218, 219, 7, 16, 0, 0, 219, 220, 7, 0, 0,
|
||||
0, 220, 221, 7, 11, 0, 0, 221, 223, 7, 3, 0, 0, 222, 213, 1, 0, 0, 0, 222,
|
||||
217, 1, 0, 0, 0, 223, 58, 1, 0, 0, 0, 224, 226, 3, 67, 33, 0, 225, 224,
|
||||
1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 225, 1, 0, 0, 0, 227, 228, 1, 0,
|
||||
0, 0, 228, 235, 1, 0, 0, 0, 229, 231, 5, 46, 0, 0, 230, 232, 3, 67, 33,
|
||||
0, 231, 230, 1, 0, 0, 0, 232, 233, 1, 0, 0, 0, 233, 231, 1, 0, 0, 0, 233,
|
||||
234, 1, 0, 0, 0, 234, 236, 1, 0, 0, 0, 235, 229, 1, 0, 0, 0, 235, 236,
|
||||
1, 0, 0, 0, 236, 60, 1, 0, 0, 0, 237, 243, 5, 34, 0, 0, 238, 242, 8, 22,
|
||||
0, 0, 239, 240, 5, 92, 0, 0, 240, 242, 9, 0, 0, 0, 241, 238, 1, 0, 0, 0,
|
||||
241, 239, 1, 0, 0, 0, 242, 245, 1, 0, 0, 0, 243, 241, 1, 0, 0, 0, 243,
|
||||
244, 1, 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 243, 1, 0, 0, 0, 246, 258,
|
||||
5, 34, 0, 0, 247, 253, 5, 39, 0, 0, 248, 252, 8, 23, 0, 0, 249, 250, 5,
|
||||
92, 0, 0, 250, 252, 9, 0, 0, 0, 251, 248, 1, 0, 0, 0, 251, 249, 1, 0, 0,
|
||||
0, 252, 255, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254,
|
||||
256, 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 256, 258, 5, 39, 0, 0, 257, 237,
|
||||
1, 0, 0, 0, 257, 247, 1, 0, 0, 0, 258, 62, 1, 0, 0, 0, 259, 263, 7, 24,
|
||||
0, 0, 260, 262, 7, 25, 0, 0, 261, 260, 1, 0, 0, 0, 262, 265, 1, 0, 0, 0,
|
||||
263, 261, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 64, 1, 0, 0, 0, 265, 263,
|
||||
1, 0, 0, 0, 266, 268, 7, 26, 0, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0,
|
||||
0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0,
|
||||
271, 272, 6, 32, 0, 0, 272, 66, 1, 0, 0, 0, 273, 274, 7, 27, 0, 0, 274,
|
||||
68, 1, 0, 0, 0, 275, 277, 8, 28, 0, 0, 276, 275, 1, 0, 0, 0, 277, 278,
|
||||
1, 0, 0, 0, 278, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 70, 1, 0,
|
||||
0, 0, 18, 0, 84, 113, 132, 154, 171, 222, 227, 233, 235, 241, 243, 251,
|
||||
253, 257, 263, 269, 278, 1, 6, 0, 0,
|
||||
}
|
||||
deserializer := antlr.NewATNDeserializer(nil)
|
||||
staticData.atn = deserializer.Deserialize(staticData.serializedATN)
|
||||
atn := staticData.atn
|
||||
staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
|
||||
decisionToDFA := staticData.decisionToDFA
|
||||
for index, state := range atn.DecisionToState {
|
||||
decisionToDFA[index] = antlr.NewDFA(state, index)
|
||||
}
|
||||
}
|
||||
|
||||
// FilterQueryLexerInit initializes any static state used to implement FilterQueryLexer. By default the
|
||||
// static state used to implement the lexer is lazily initialized during the first call to
|
||||
// NewFilterQueryLexer(). You can call this function if you wish to initialize the static state ahead
|
||||
// of time.
|
||||
func FilterQueryLexerInit() {
|
||||
staticData := &FilterQueryLexerLexerStaticData
|
||||
staticData.once.Do(filterquerylexerLexerInit)
|
||||
}
|
||||
|
||||
// NewFilterQueryLexer produces a new lexer instance for the optional input antlr.CharStream.
|
||||
func NewFilterQueryLexer(input antlr.CharStream) *FilterQueryLexer {
|
||||
FilterQueryLexerInit()
|
||||
l := new(FilterQueryLexer)
|
||||
l.BaseLexer = antlr.NewBaseLexer(input)
|
||||
staticData := &FilterQueryLexerLexerStaticData
|
||||
l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache)
|
||||
l.channelNames = staticData.ChannelNames
|
||||
l.modeNames = staticData.ModeNames
|
||||
l.RuleNames = staticData.RuleNames
|
||||
l.LiteralNames = staticData.LiteralNames
|
||||
l.SymbolicNames = staticData.SymbolicNames
|
||||
l.GrammarFileName = "FilterQuery.g4"
|
||||
// TODO: l.EOF = antlr.TokenEOF
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// FilterQueryLexer tokens.
|
||||
const (
|
||||
FilterQueryLexerLPAREN = 1
|
||||
FilterQueryLexerRPAREN = 2
|
||||
FilterQueryLexerLBRACK = 3
|
||||
FilterQueryLexerRBRACK = 4
|
||||
FilterQueryLexerCOMMA = 5
|
||||
FilterQueryLexerEQUALS = 6
|
||||
FilterQueryLexerNOT_EQUALS = 7
|
||||
FilterQueryLexerNEQ = 8
|
||||
FilterQueryLexerLT = 9
|
||||
FilterQueryLexerLE = 10
|
||||
FilterQueryLexerGT = 11
|
||||
FilterQueryLexerGE = 12
|
||||
FilterQueryLexerLIKE = 13
|
||||
FilterQueryLexerNOT_LIKE = 14
|
||||
FilterQueryLexerILIKE = 15
|
||||
FilterQueryLexerNOT_ILIKE = 16
|
||||
FilterQueryLexerBETWEEN = 17
|
||||
FilterQueryLexerEXISTS = 18
|
||||
FilterQueryLexerREGEXP = 19
|
||||
FilterQueryLexerCONTAINS = 20
|
||||
FilterQueryLexerIN = 21
|
||||
FilterQueryLexerNOT = 22
|
||||
FilterQueryLexerAND = 23
|
||||
FilterQueryLexerOR = 24
|
||||
FilterQueryLexerHAS = 25
|
||||
FilterQueryLexerHASANY = 26
|
||||
FilterQueryLexerHASALL = 27
|
||||
FilterQueryLexerHASNONE = 28
|
||||
FilterQueryLexerBOOL = 29
|
||||
FilterQueryLexerNUMBER = 30
|
||||
FilterQueryLexerQUOTED_TEXT = 31
|
||||
FilterQueryLexerKEY = 32
|
||||
FilterQueryLexerWS = 33
|
||||
FilterQueryLexerFREETEXT = 34
|
||||
)
|
||||
112
pkg/parser/grammar/filterquery_listener.go
Normal file
112
pkg/parser/grammar/filterquery_listener.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
// FilterQueryListener is a complete listener for a parse tree produced by FilterQueryParser.
|
||||
type FilterQueryListener interface {
|
||||
antlr.ParseTreeListener
|
||||
|
||||
// EnterQuery is called when entering the query production.
|
||||
EnterQuery(c *QueryContext)
|
||||
|
||||
// EnterExpression is called when entering the expression production.
|
||||
EnterExpression(c *ExpressionContext)
|
||||
|
||||
// EnterOrExpression is called when entering the orExpression production.
|
||||
EnterOrExpression(c *OrExpressionContext)
|
||||
|
||||
// EnterAndExpression is called when entering the andExpression production.
|
||||
EnterAndExpression(c *AndExpressionContext)
|
||||
|
||||
// EnterUnaryExpression is called when entering the unaryExpression production.
|
||||
EnterUnaryExpression(c *UnaryExpressionContext)
|
||||
|
||||
// EnterPrimary is called when entering the primary production.
|
||||
EnterPrimary(c *PrimaryContext)
|
||||
|
||||
// EnterComparison is called when entering the comparison production.
|
||||
EnterComparison(c *ComparisonContext)
|
||||
|
||||
// EnterInClause is called when entering the inClause production.
|
||||
EnterInClause(c *InClauseContext)
|
||||
|
||||
// EnterNotInClause is called when entering the notInClause production.
|
||||
EnterNotInClause(c *NotInClauseContext)
|
||||
|
||||
// EnterValueList is called when entering the valueList production.
|
||||
EnterValueList(c *ValueListContext)
|
||||
|
||||
// EnterFullText is called when entering the fullText production.
|
||||
EnterFullText(c *FullTextContext)
|
||||
|
||||
// EnterFunctionCall is called when entering the functionCall production.
|
||||
EnterFunctionCall(c *FunctionCallContext)
|
||||
|
||||
// EnterFunctionParamList is called when entering the functionParamList production.
|
||||
EnterFunctionParamList(c *FunctionParamListContext)
|
||||
|
||||
// EnterFunctionParam is called when entering the functionParam production.
|
||||
EnterFunctionParam(c *FunctionParamContext)
|
||||
|
||||
// EnterArray is called when entering the array production.
|
||||
EnterArray(c *ArrayContext)
|
||||
|
||||
// EnterValue is called when entering the value production.
|
||||
EnterValue(c *ValueContext)
|
||||
|
||||
// EnterKey is called when entering the key production.
|
||||
EnterKey(c *KeyContext)
|
||||
|
||||
// ExitQuery is called when exiting the query production.
|
||||
ExitQuery(c *QueryContext)
|
||||
|
||||
// ExitExpression is called when exiting the expression production.
|
||||
ExitExpression(c *ExpressionContext)
|
||||
|
||||
// ExitOrExpression is called when exiting the orExpression production.
|
||||
ExitOrExpression(c *OrExpressionContext)
|
||||
|
||||
// ExitAndExpression is called when exiting the andExpression production.
|
||||
ExitAndExpression(c *AndExpressionContext)
|
||||
|
||||
// ExitUnaryExpression is called when exiting the unaryExpression production.
|
||||
ExitUnaryExpression(c *UnaryExpressionContext)
|
||||
|
||||
// ExitPrimary is called when exiting the primary production.
|
||||
ExitPrimary(c *PrimaryContext)
|
||||
|
||||
// ExitComparison is called when exiting the comparison production.
|
||||
ExitComparison(c *ComparisonContext)
|
||||
|
||||
// ExitInClause is called when exiting the inClause production.
|
||||
ExitInClause(c *InClauseContext)
|
||||
|
||||
// ExitNotInClause is called when exiting the notInClause production.
|
||||
ExitNotInClause(c *NotInClauseContext)
|
||||
|
||||
// ExitValueList is called when exiting the valueList production.
|
||||
ExitValueList(c *ValueListContext)
|
||||
|
||||
// ExitFullText is called when exiting the fullText production.
|
||||
ExitFullText(c *FullTextContext)
|
||||
|
||||
// ExitFunctionCall is called when exiting the functionCall production.
|
||||
ExitFunctionCall(c *FunctionCallContext)
|
||||
|
||||
// ExitFunctionParamList is called when exiting the functionParamList production.
|
||||
ExitFunctionParamList(c *FunctionParamListContext)
|
||||
|
||||
// ExitFunctionParam is called when exiting the functionParam production.
|
||||
ExitFunctionParam(c *FunctionParamContext)
|
||||
|
||||
// ExitArray is called when exiting the array production.
|
||||
ExitArray(c *ArrayContext)
|
||||
|
||||
// ExitValue is called when exiting the value production.
|
||||
ExitValue(c *ValueContext)
|
||||
|
||||
// ExitKey is called when exiting the key production.
|
||||
ExitKey(c *KeyContext)
|
||||
}
|
||||
3539
pkg/parser/grammar/filterquery_parser.go
Normal file
3539
pkg/parser/grammar/filterquery_parser.go
Normal file
File diff suppressed because it is too large
Load Diff
61
pkg/parser/grammar/filterquery_visitor.go
Normal file
61
pkg/parser/grammar/filterquery_visitor.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
// A complete Visitor for a parse tree produced by FilterQueryParser.
|
||||
type FilterQueryVisitor interface {
|
||||
antlr.ParseTreeVisitor
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#query.
|
||||
VisitQuery(ctx *QueryContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#expression.
|
||||
VisitExpression(ctx *ExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#orExpression.
|
||||
VisitOrExpression(ctx *OrExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#andExpression.
|
||||
VisitAndExpression(ctx *AndExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#unaryExpression.
|
||||
VisitUnaryExpression(ctx *UnaryExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#primary.
|
||||
VisitPrimary(ctx *PrimaryContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#comparison.
|
||||
VisitComparison(ctx *ComparisonContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#inClause.
|
||||
VisitInClause(ctx *InClauseContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#notInClause.
|
||||
VisitNotInClause(ctx *NotInClauseContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#valueList.
|
||||
VisitValueList(ctx *ValueListContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#fullText.
|
||||
VisitFullText(ctx *FullTextContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#functionCall.
|
||||
VisitFunctionCall(ctx *FunctionCallContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#functionParamList.
|
||||
VisitFunctionParamList(ctx *FunctionParamListContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#functionParam.
|
||||
VisitFunctionParam(ctx *FunctionParamContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#array.
|
||||
VisitArray(ctx *ArrayContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#value.
|
||||
VisitValue(ctx *ValueContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#key.
|
||||
VisitKey(ctx *KeyContext) interface{}
|
||||
}
|
||||
22
pkg/query-service/Dockerfile.multi-arch
Normal file
22
pkg/query-service/Dockerfile.multi-arch
Normal file
@@ -0,0 +1,22 @@
|
||||
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
|
||||
|
||||
FROM alpine@sha256:${ALPINE_SHA}
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG ARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY ./target/${OS}-${ARCH}/signoz-community /root/signoz-community
|
||||
COPY ./conf/prometheus.yml /root/config/prometheus.yml
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz-community
|
||||
|
||||
ENTRYPOINT ["./signoz-community"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/mailru/easyjson"
|
||||
"github.com/uptrace/bun"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -33,7 +34,6 @@ import (
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
@@ -41,6 +41,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logs"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/resource"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/traces/smart"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/traces/tracedetail"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
@@ -144,6 +145,9 @@ type ClickHouseReader struct {
|
||||
liveTailRefreshSeconds int
|
||||
cluster string
|
||||
|
||||
useLogsNewSchema bool
|
||||
useTraceNewSchema bool
|
||||
|
||||
logsTableName string
|
||||
logsLocalTableName string
|
||||
|
||||
@@ -164,6 +168,8 @@ func NewReader(
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
fluxIntervalForTraceDetail time.Duration,
|
||||
cache cache.Cache,
|
||||
) *ClickHouseReader {
|
||||
@@ -177,13 +183,13 @@ func NewReaderFromClickhouseConnection(
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
prometheus prometheus.Prometheus,
|
||||
cluster string,
|
||||
useLogsNewSchema bool,
|
||||
useTraceNewSchema bool,
|
||||
fluxIntervalForTraceDetail time.Duration,
|
||||
cache cache.Cache,
|
||||
) *ClickHouseReader {
|
||||
logsTableName := options.primary.LogsTable
|
||||
logsLocalTableName := options.primary.LogsLocalTable
|
||||
|
||||
useLogsNewSchema, _ := featureControl.Boolean(context.Background(), valuer.UUID{}, featuretypes.UseLogsNewSchema)
|
||||
if useLogsNewSchema {
|
||||
logsTableName = options.primary.LogsTableV2
|
||||
logsLocalTableName = options.primary.LogsLocalTableV2
|
||||
@@ -191,7 +197,6 @@ func NewReaderFromClickhouseConnection(
|
||||
|
||||
traceTableName := options.primary.IndexTable
|
||||
traceLocalTableName := options.primary.LocalIndexTable
|
||||
useTraceNewSchema, _ := featureControl.Boolean(context.Background(), valuer.UUID{}, featuretypes.UseTracesNewSchema)
|
||||
if useTraceNewSchema {
|
||||
traceTableName = options.primary.TraceIndexTableV3
|
||||
traceLocalTableName = options.primary.TraceLocalTableNameV3
|
||||
@@ -287,7 +292,7 @@ func (r *ClickHouseReader) GetServicesList(ctx context.Context) (*[]string, erro
|
||||
services := []string{}
|
||||
query := fmt.Sprintf(`SELECT DISTINCT serviceName FROM %s.%s WHERE toDate(timestamp) > now() - INTERVAL 1 DAY`, r.TraceDB, r.traceTableName)
|
||||
|
||||
if ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseTracesNewSchema); ok {
|
||||
if r.useTraceNewSchema {
|
||||
query = fmt.Sprintf(`SELECT DISTINCT serviceName FROM %s.%s WHERE ts_bucket_start > (toUnixTimestamp(now() - INTERVAL 1 DAY) - 1800) AND toDate(timestamp) > now() - INTERVAL 1 DAY`, r.TraceDB, r.traceTableName)
|
||||
}
|
||||
|
||||
@@ -401,7 +406,9 @@ func (r *ClickHouseReader) buildResourceSubQuery(tags []model.TagQueryParam, svc
|
||||
&filterSet,
|
||||
[]v3.AttributeKey{},
|
||||
v3.AttributeKey{},
|
||||
false)
|
||||
false,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return "", err
|
||||
@@ -535,7 +542,7 @@ func (r *ClickHouseReader) GetServicesV2(ctx context.Context, queryParams *model
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.GetServicesParams, skipConfig *model.SkipConfig) (*[]model.ServiceItem, *model.ApiError) {
|
||||
if ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseTracesNewSchema); ok {
|
||||
if r.useTraceNewSchema {
|
||||
return r.GetServicesV2(ctx, queryParams, skipConfig)
|
||||
}
|
||||
|
||||
@@ -889,7 +896,7 @@ func (r *ClickHouseReader) GetTopOperationsV2(ctx context.Context, queryParams *
|
||||
|
||||
func (r *ClickHouseReader) GetTopOperations(ctx context.Context, queryParams *model.GetTopOperationsParams) (*[]model.TopOperationsItem, *model.ApiError) {
|
||||
|
||||
if ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseTracesNewSchema); ok {
|
||||
if r.useTraceNewSchema {
|
||||
return r.GetTopOperationsV2(ctx, queryParams)
|
||||
}
|
||||
|
||||
@@ -2677,7 +2684,7 @@ func (r *ClickHouseReader) GetSpansInLastHeartBeatInterval(ctx context.Context,
|
||||
var spansInLastHeartBeatInterval uint64
|
||||
|
||||
queryStr := fmt.Sprintf("SELECT count() from %s.%s where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d));", signozTraceDBName, signozSpansTable, int(interval.Minutes()))
|
||||
if ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseTracesNewSchema); ok {
|
||||
if r.useTraceNewSchema {
|
||||
queryStr = fmt.Sprintf("SELECT count() from %s.%s where ts_bucket_start >= toUInt64(toUnixTimestamp(now() - toIntervalMinute(%d))) - 1800 and timestamp > toUnixTimestamp(now()-toIntervalMinute(%d));", signozTraceDBName, r.traceTableName, int(interval.Minutes()), int(interval.Minutes()))
|
||||
}
|
||||
r.db.QueryRow(ctx, queryStr).Scan(&spansInLastHeartBeatInterval)
|
||||
@@ -2817,7 +2824,7 @@ func (r *ClickHouseReader) GetTagsInfoInLastHeartBeatInterval(ctx context.Contex
|
||||
where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d))
|
||||
group by serviceName, env, language;`, r.TraceDB, r.traceTableName, int(interval.Minutes()))
|
||||
|
||||
if ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseTracesNewSchema); ok {
|
||||
if r.useTraceNewSchema {
|
||||
queryStr = fmt.Sprintf(`select serviceName, resources_string['deployment.environment'] as env,
|
||||
resources_string['telemetry.sdk.language'] as language from %s.%s
|
||||
where timestamp > toUnixTimestamp(now()-toIntervalMinute(%d))
|
||||
@@ -2922,9 +2929,8 @@ func (r *ClickHouseReader) extractSelectedAndInterestingFields(tableStatement st
|
||||
if overrideFieldType != "" {
|
||||
field.Type = overrideFieldType
|
||||
}
|
||||
ok, _ := r.featureControl.Boolean(context.Background(), valuer.UUID{}, featuretypes.UseLogsNewSchema)
|
||||
// all static fields are assumed to be selected as we don't allow changing them
|
||||
if isColumn(ok, tableStatement, field.Type, field.Name, field.DataType) {
|
||||
if isColumn(r.useLogsNewSchema, tableStatement, field.Type, field.Name, field.DataType) {
|
||||
response.Selected = append(response.Selected, field)
|
||||
} else {
|
||||
response.Interesting = append(response.Interesting, field)
|
||||
@@ -3005,8 +3011,7 @@ func (r *ClickHouseReader) UpdateLogField(ctx context.Context, field *model.Upda
|
||||
return &model.ApiError{Err: err, Typ: model.ErrorBadData}
|
||||
}
|
||||
|
||||
ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseLogsNewSchema)
|
||||
if ok {
|
||||
if r.useLogsNewSchema {
|
||||
return r.UpdateLogFieldV2(ctx, field)
|
||||
}
|
||||
|
||||
@@ -3753,7 +3758,7 @@ func (r *ClickHouseReader) GetLatestReceivedMetric(
|
||||
|
||||
quotedMetricNames := []string{}
|
||||
for _, m := range metricNames {
|
||||
quotedMetricNames = append(quotedMetricNames, utils.ClickHouseFormattedValue(m))
|
||||
quotedMetricNames = append(quotedMetricNames, utils.ClickHouseFormattedValue(m, false))
|
||||
}
|
||||
commaSeparatedMetricNames := strings.Join(quotedMetricNames, ", ")
|
||||
|
||||
@@ -3895,7 +3900,6 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v
|
||||
var tagKey string
|
||||
var dataType string
|
||||
var attType string
|
||||
ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseLogsNewSchema)
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&tagKey, &attType, &dataType); err != nil {
|
||||
return nil, fmt.Errorf("error while scanning rows: %s", err.Error())
|
||||
@@ -3904,7 +3908,7 @@ func (r *ClickHouseReader) GetLogAggregateAttributes(ctx context.Context, req *v
|
||||
Key: tagKey,
|
||||
DataType: v3.AttributeKeyDataType(dataType),
|
||||
Type: v3.AttributeKeyType(attType),
|
||||
IsColumn: isColumn(ok, statements[0].Statement, attType, tagKey, dataType),
|
||||
IsColumn: isColumn(r.useLogsNewSchema, statements[0].Statement, attType, tagKey, dataType),
|
||||
}
|
||||
response.AttributeKeys = append(response.AttributeKeys, key)
|
||||
}
|
||||
@@ -3950,7 +3954,6 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
|
||||
var attributeKey string
|
||||
var attributeDataType string
|
||||
var tagType string
|
||||
ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseLogsNewSchema)
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&attributeKey, &tagType, &attributeDataType); err != nil {
|
||||
return nil, fmt.Errorf("error while scanning rows: %s", err.Error())
|
||||
@@ -3960,7 +3963,7 @@ func (r *ClickHouseReader) GetLogAttributeKeys(ctx context.Context, req *v3.Filt
|
||||
Key: attributeKey,
|
||||
DataType: v3.AttributeKeyDataType(attributeDataType),
|
||||
Type: v3.AttributeKeyType(tagType),
|
||||
IsColumn: isColumn(ok, statements[0].Statement, tagType, attributeKey, attributeDataType),
|
||||
IsColumn: isColumn(r.useLogsNewSchema, statements[0].Statement, tagType, attributeKey, attributeDataType),
|
||||
}
|
||||
|
||||
response.AttributeKeys = append(response.AttributeKeys, key)
|
||||
@@ -4014,16 +4017,16 @@ func (r *ClickHouseReader) FetchRelatedValues(ctx context.Context, req *v3.Filte
|
||||
}
|
||||
switch v := item.Value.(type) {
|
||||
case string:
|
||||
fmtVal := utils.ClickHouseFormattedValue(v)
|
||||
fmtVal := utils.ClickHouseFormattedValue(v, false)
|
||||
addCondition(fmtVal)
|
||||
case []string:
|
||||
for _, val := range v {
|
||||
fmtVal := utils.ClickHouseFormattedValue(val)
|
||||
fmtVal := utils.ClickHouseFormattedValue(val, false)
|
||||
addCondition(fmtVal)
|
||||
}
|
||||
case []interface{}:
|
||||
for _, val := range v {
|
||||
fmtVal := utils.ClickHouseFormattedValue(val)
|
||||
fmtVal := utils.ClickHouseFormattedValue(val, false)
|
||||
addCondition(fmtVal)
|
||||
}
|
||||
}
|
||||
@@ -4691,8 +4694,7 @@ func (r *ClickHouseReader) GetTraceAggregateAttributes(ctx context.Context, req
|
||||
}
|
||||
|
||||
fields := constants.NewStaticFieldsTraces
|
||||
ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseTracesNewSchema)
|
||||
if !ok {
|
||||
if !r.useTraceNewSchema {
|
||||
fields = constants.DeprecatedStaticFieldsTraces
|
||||
}
|
||||
|
||||
@@ -4756,8 +4758,7 @@ func (r *ClickHouseReader) GetTraceAttributeKeys(ctx context.Context, req *v3.Fi
|
||||
|
||||
// remove this later just to have NewStaticFieldsTraces in the response
|
||||
fields := constants.NewStaticFieldsTraces
|
||||
ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseTracesNewSchema)
|
||||
if !ok {
|
||||
if !r.useTraceNewSchema {
|
||||
fields = constants.DeprecatedStaticFieldsTraces
|
||||
}
|
||||
|
||||
@@ -4821,7 +4822,7 @@ func (r *ClickHouseReader) GetTraceAttributeValues(ctx context.Context, req *v3.
|
||||
|
||||
// TODO(nitya): remove 24 hour limit in future after checking the perf/resource implications
|
||||
where := "timestamp >= toDateTime64(now() - INTERVAL 48 HOUR, 9)"
|
||||
if ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseTracesNewSchema); ok {
|
||||
if r.useTraceNewSchema {
|
||||
where += " AND ts_bucket_start >= toUInt64(toUnixTimestamp(now() - INTERVAL 48 HOUR))"
|
||||
}
|
||||
query = fmt.Sprintf("SELECT DISTINCT %s FROM %s.%s WHERE %s AND %s ILIKE $1 LIMIT $2", selectKey, r.TraceDB, r.traceTableName, where, filterValueColumnWhere)
|
||||
@@ -4919,8 +4920,7 @@ func (r *ClickHouseReader) GetSpanAttributeKeysV2(ctx context.Context) (map[stri
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetSpanAttributeKeys(ctx context.Context) (map[string]v3.AttributeKey, error) {
|
||||
ok, _ := r.featureControl.Boolean(ctx, valuer.UUID{}, featuretypes.UseTracesNewSchema)
|
||||
if ok {
|
||||
if r.useTraceNewSchema {
|
||||
return r.GetSpanAttributeKeysV2(ctx)
|
||||
}
|
||||
var query string
|
||||
@@ -5123,7 +5123,7 @@ func (r *ClickHouseReader) ReadRuleStateHistoryByRuleID(
|
||||
if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains {
|
||||
toFormat = fmt.Sprintf("%%%s%%", toFormat)
|
||||
}
|
||||
fmtVal := utils.ClickHouseFormattedValue(toFormat)
|
||||
fmtVal := utils.ClickHouseFormattedValue(toFormat, false)
|
||||
switch op {
|
||||
case v3.FilterOperatorEqual:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') = %s", item.Key.Key, fmtVal))
|
||||
@@ -6795,3 +6795,262 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, metric
|
||||
|
||||
return cachedMetadata, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error) {
|
||||
searchSpansResult := []model.SearchSpansResult{
|
||||
{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError", "StatusMessage", "StatusCodeString", "SpanKind"},
|
||||
IsSubTree: false,
|
||||
Events: make([][]interface{}, 0),
|
||||
},
|
||||
}
|
||||
|
||||
var traceSummary model.TraceSummary
|
||||
summaryQuery := fmt.Sprintf("SELECT * from %s.%s WHERE trace_id=$1", r.TraceDB, r.traceSummaryTable)
|
||||
err := r.db.QueryRow(ctx, summaryQuery, params.TraceID).Scan(&traceSummary.TraceID, &traceSummary.Start, &traceSummary.End, &traceSummary.NumSpans)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
|
||||
if traceSummary.NumSpans > uint64(params.MaxSpansInTrace) {
|
||||
zap.L().Error("Max spans allowed in a trace limit reached", zap.Int("MaxSpansInTrace", params.MaxSpansInTrace),
|
||||
zap.Uint64("Count", traceSummary.NumSpans))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": traceSummary.NumSpans,
|
||||
"maxSpansInTraceLimit": params.MaxSpansInTrace,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_MAX_SPANS_ALLOWED_LIMIT_REACHED, data, claims.Email, true, false)
|
||||
}
|
||||
return nil, fmt.Errorf("max spans allowed in trace limit reached, please contact support for more details")
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": traceSummary.NumSpans,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_TRACE_DETAIL_API, data, claims.Email, true, false)
|
||||
}
|
||||
|
||||
var startTime, endTime, durationNano uint64
|
||||
var searchScanResponses []model.SpanItemV2
|
||||
|
||||
query := fmt.Sprintf("SELECT timestamp, duration_nano, span_id, trace_id, has_error, kind, resource_string_service$$name, name, references, attributes_string, attributes_number, attributes_bool, resources_string, events, status_message, status_code_string, kind_string FROM %s.%s WHERE trace_id=$1 and ts_bucket_start>=$2 and ts_bucket_start<=$3", r.TraceDB, r.traceTableName)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
err = r.db.Select(ctx, &searchScanResponses, query, params.TraceID, strconv.FormatInt(traceSummary.Start.Unix()-1800, 10), strconv.FormatInt(traceSummary.End.Unix(), 10))
|
||||
|
||||
zap.L().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
end := time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery took: ", zap.Duration("duration", end.Sub(start)))
|
||||
|
||||
searchSpansResult[0].Events = make([][]interface{}, len(searchScanResponses))
|
||||
|
||||
searchSpanResponses := []model.SearchSpanResponseItem{}
|
||||
start = time.Now()
|
||||
for _, item := range searchScanResponses {
|
||||
ref := []model.OtelSpanRef{}
|
||||
err := json.Unmarshal([]byte(item.References), &ref)
|
||||
if err != nil {
|
||||
zap.L().Error("Error unmarshalling references", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// merge attributes_number and attributes_bool to attributes_string
|
||||
for k, v := range item.Attributes_bool {
|
||||
item.Attributes_string[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
for k, v := range item.Attributes_number {
|
||||
item.Attributes_string[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
for k, v := range item.Resources_string {
|
||||
item.Attributes_string[k] = v
|
||||
}
|
||||
|
||||
jsonItem := model.SearchSpanResponseItem{
|
||||
SpanID: item.SpanID,
|
||||
TraceID: item.TraceID,
|
||||
ServiceName: item.ServiceName,
|
||||
Name: item.Name,
|
||||
Kind: int32(item.Kind),
|
||||
DurationNano: int64(item.DurationNano),
|
||||
HasError: item.HasError,
|
||||
StatusMessage: item.StatusMessage,
|
||||
StatusCodeString: item.StatusCodeString,
|
||||
SpanKind: item.SpanKind,
|
||||
References: ref,
|
||||
Events: item.Events,
|
||||
TagMap: item.Attributes_string,
|
||||
}
|
||||
|
||||
jsonItem.TimeUnixNano = uint64(item.TimeUnixNano.UnixNano() / 1000000)
|
||||
|
||||
searchSpanResponses = append(searchSpanResponses, jsonItem)
|
||||
if startTime == 0 || jsonItem.TimeUnixNano < startTime {
|
||||
startTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if endTime == 0 || jsonItem.TimeUnixNano > endTime {
|
||||
endTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if durationNano == 0 || uint64(jsonItem.DurationNano) > durationNano {
|
||||
durationNano = uint64(jsonItem.DurationNano)
|
||||
}
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery unmarshal took: ", zap.Duration("duration", end.Sub(start)))
|
||||
|
||||
if len(searchScanResponses) > params.SpansRenderLimit {
|
||||
start = time.Now()
|
||||
searchSpansResult, err = smart.SmartTraceAlgorithm(searchSpanResponses, params.SpanID, params.LevelUp, params.LevelDown, params.SpansRenderLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("smartTraceAlgo took: ", zap.Duration("duration", end.Sub(start)))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": len(searchScanResponses),
|
||||
"spansRenderLimit": params.SpansRenderLimit,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LARGE_TRACE_OPENED, data, claims.Email, true, false)
|
||||
}
|
||||
} else {
|
||||
for i, item := range searchSpanResponses {
|
||||
spanEvents := item.GetValues()
|
||||
searchSpansResult[0].Events[i] = spanEvents
|
||||
}
|
||||
}
|
||||
|
||||
searchSpansResult[0].StartTimestampMillis = startTime - (durationNano / 1000000)
|
||||
searchSpansResult[0].EndTimestampMillis = endTime + (durationNano / 1000000)
|
||||
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error) {
|
||||
|
||||
if r.useTraceNewSchema {
|
||||
return r.SearchTracesV2(ctx, params)
|
||||
}
|
||||
|
||||
var countSpans uint64
|
||||
countQuery := fmt.Sprintf("SELECT count() as count from %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
||||
err := r.db.QueryRow(ctx, countQuery, params.TraceID).Scan(&countSpans)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
|
||||
if countSpans > uint64(params.MaxSpansInTrace) {
|
||||
zap.L().Error("Max spans allowed in a trace limit reached", zap.Int("MaxSpansInTrace", params.MaxSpansInTrace),
|
||||
zap.Uint64("Count", countSpans))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": countSpans,
|
||||
"maxSpansInTraceLimit": params.MaxSpansInTrace,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_MAX_SPANS_ALLOWED_LIMIT_REACHED, data, claims.Email, true, false)
|
||||
}
|
||||
return nil, fmt.Errorf("max spans allowed in trace limit reached, please contact support for more details")
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": countSpans,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_TRACE_DETAIL_API, data, claims.Email, true, false)
|
||||
}
|
||||
|
||||
var startTime, endTime, durationNano uint64
|
||||
var searchScanResponses []model.SearchSpanDBResponseItem
|
||||
|
||||
query := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
err = r.db.Select(ctx, &searchScanResponses, query, params.TraceID)
|
||||
|
||||
zap.L().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
end := time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery took: ", zap.Duration("duration", end.Sub(start)))
|
||||
searchSpansResult := []model.SearchSpansResult{{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError", "StatusMessage", "StatusCodeString", "SpanKind"},
|
||||
Events: make([][]interface{}, len(searchScanResponses)),
|
||||
IsSubTree: false,
|
||||
},
|
||||
}
|
||||
|
||||
searchSpanResponses := []model.SearchSpanResponseItem{}
|
||||
start = time.Now()
|
||||
for _, item := range searchScanResponses {
|
||||
var jsonItem model.SearchSpanResponseItem
|
||||
easyjson.Unmarshal([]byte(item.Model), &jsonItem)
|
||||
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano() / 1000000)
|
||||
searchSpanResponses = append(searchSpanResponses, jsonItem)
|
||||
if startTime == 0 || jsonItem.TimeUnixNano < startTime {
|
||||
startTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if endTime == 0 || jsonItem.TimeUnixNano > endTime {
|
||||
endTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if durationNano == 0 || uint64(jsonItem.DurationNano) > durationNano {
|
||||
durationNano = uint64(jsonItem.DurationNano)
|
||||
}
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery unmarshal took: ", zap.Duration("duration", end.Sub(start)))
|
||||
|
||||
if len(searchScanResponses) > params.SpansRenderLimit {
|
||||
start = time.Now()
|
||||
searchSpansResult, err = smart.SmartTraceAlgorithm(searchSpanResponses, params.SpanID, params.LevelUp, params.LevelDown, params.SpansRenderLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("smartTraceAlgo took: ", zap.Duration("duration", end.Sub(start)))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": len(searchScanResponses),
|
||||
"spansRenderLimit": params.SpansRenderLimit,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LARGE_TRACE_OPENED, data, claims.Email, true, false)
|
||||
}
|
||||
} else {
|
||||
for i, item := range searchSpanResponses {
|
||||
spanEvents := item.GetValues()
|
||||
searchSpansResult[0].Events[i] = spanEvents
|
||||
}
|
||||
}
|
||||
|
||||
searchSpansResult[0].StartTimestampMillis = startTime - (durationNano / 1000000)
|
||||
searchSpansResult[0].EndTimestampMillis = endTime + (durationNano / 1000000)
|
||||
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
|
||||
@@ -549,6 +549,7 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
router.HandleFunc("/api/v1/services/list", am.ViewAccess(aH.getServicesList)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/service/top_operations", am.ViewAccess(aH.getTopOperations)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/service/top_level_operations", am.ViewAccess(aH.getServicesTopLevelOps)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(aH.SearchTraces)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/usage", am.ViewAccess(aH.getUsage)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/dependency_graph", am.ViewAccess(aH.dependencyGraph)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/settings/ttl", am.AdminAccess(aH.setTTL)).Methods(http.MethodPost)
|
||||
@@ -1724,6 +1725,22 @@ func (aH *APIHandler) getServicesList(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) SearchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
params, err := ParseSearchTracesParams(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||
return
|
||||
}
|
||||
|
||||
result, err := aH.reader.SearchTraces(r.Context(), params)
|
||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
aH.WriteJSON(w, r, result)
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) GetWaterfallSpansForTraceWithMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
traceID := mux.Vars(r)["traceId"]
|
||||
if traceID == "" {
|
||||
|
||||
@@ -28,15 +28,15 @@ func generateOverviewSQL(start, end int64, item []v3.FilterItem) string {
|
||||
for _, filter := range item {
|
||||
switch filter.Key.Key {
|
||||
case "service.name":
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s IN (%s)", "service_name", format.ClickHouseFormattedValue(filter.Value)))
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s IN (%s)", "service_name", format.ClickHouseFormattedValue(filter.Value, false)))
|
||||
case "name":
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s IN (%s)", "span_name", format.ClickHouseFormattedValue(filter.Value)))
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s IN (%s)", "span_name", format.ClickHouseFormattedValue(filter.Value, false)))
|
||||
case "destination":
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s IN (%s)", "destination", format.ClickHouseFormattedValue(filter.Value)))
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s IN (%s)", "destination", format.ClickHouseFormattedValue(filter.Value, false)))
|
||||
case "queue":
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s IN (%s)", "messaging_system", format.ClickHouseFormattedValue(filter.Value)))
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s IN (%s)", "messaging_system", format.ClickHouseFormattedValue(filter.Value, false)))
|
||||
case "kind_string":
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s IN (%s)", "kind_string", format.ClickHouseFormattedValue(filter.Value)))
|
||||
whereClauses = append(whereClauses, fmt.Sprintf("%s IN (%s)", "kind_string", format.ClickHouseFormattedValue(filter.Value, false)))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ func GetPathIndexFilter(path string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetJSONFilter(item v3.FilterItem) (string, error) {
|
||||
func GetJSONFilter(item v3.FilterItem, isEscaped bool) (string, error) {
|
||||
|
||||
dataType := item.Key.DataType
|
||||
isArray := false
|
||||
@@ -166,13 +166,13 @@ func GetJSONFilter(item v3.FilterItem) (string, error) {
|
||||
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
|
||||
filter = fmt.Sprintf(logsOp, key, GetPath(strings.Split(item.Key.Key, ".")[1:]))
|
||||
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex, v3.FilterOperatorHas, v3.FilterOperatorNotHas:
|
||||
fmtVal := utils.ClickHouseFormattedValue(value)
|
||||
fmtVal := utils.ClickHouseFormattedValue(value, isEscaped)
|
||||
filter = fmt.Sprintf(logsOp, key, fmtVal)
|
||||
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
|
||||
val := utils.QuoteEscapedString(fmt.Sprintf("%v", item.Value))
|
||||
filter = fmt.Sprintf("%s %s '%%%s%%'", key, logsOp, val)
|
||||
default:
|
||||
fmtVal := utils.ClickHouseFormattedValue(value)
|
||||
fmtVal := utils.ClickHouseFormattedValue(value, isEscaped)
|
||||
filter = fmt.Sprintf("%s %s %s", key, logsOp, fmtVal)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -331,7 +331,7 @@ var testGetJSONFilterData = []struct {
|
||||
func TestGetJSONFilter(t *testing.T) {
|
||||
for _, tt := range testGetJSONFilterData {
|
||||
Convey("testGetJSONFilter", t, func() {
|
||||
filter, err := GetJSONFilter(tt.FilterItem)
|
||||
filter, err := GetJSONFilter(tt.FilterItem, false)
|
||||
if tt.Error {
|
||||
So(err, ShouldNotBeNil)
|
||||
} else {
|
||||
|
||||
@@ -168,7 +168,7 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey,
|
||||
if fs != nil && len(fs.Items) != 0 {
|
||||
for _, item := range fs.Items {
|
||||
if item.Key.IsJSON {
|
||||
filter, err := GetJSONFilter(item)
|
||||
filter, err := GetJSONFilter(item, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -193,7 +193,7 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey,
|
||||
conditions = append(conditions, GetExistsNexistsFilter(op, item))
|
||||
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex:
|
||||
columnName := getClickhouseColumnName(item.Key)
|
||||
fmtVal := utils.ClickHouseFormattedValue(value)
|
||||
fmtVal := utils.ClickHouseFormattedValue(value, false)
|
||||
conditions = append(conditions, fmt.Sprintf(logsOp, columnName, fmtVal))
|
||||
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
|
||||
columnName := getClickhouseColumnName(item.Key)
|
||||
@@ -206,7 +206,7 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey,
|
||||
}
|
||||
default:
|
||||
columnName := getClickhouseColumnName(item.Key)
|
||||
fmtVal := utils.ClickHouseFormattedValue(value)
|
||||
fmtVal := utils.ClickHouseFormattedValue(value, false)
|
||||
|
||||
// for use lower for like and ilike
|
||||
if op == v3.FilterOperatorLike || op == v3.FilterOperatorNotLike {
|
||||
@@ -444,7 +444,7 @@ func Having(items []v3.Having) string {
|
||||
// aggregate something and filter on that aggregate
|
||||
var having []string
|
||||
for _, item := range items {
|
||||
having = append(having, fmt.Sprintf("value %s %s", item.Operator, utils.ClickHouseFormattedValue(item.Value)))
|
||||
having = append(having, fmt.Sprintf("value %s %s", item.Operator, utils.ClickHouseFormattedValue(item.Value, false)))
|
||||
}
|
||||
return strings.Join(having, " AND ")
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ var jsonLogOperators = map[v3.FilterOperator]string{
|
||||
v3.FilterOperatorNotHas: "NOT has(%s, %s)",
|
||||
}
|
||||
|
||||
func GetJSONFilter(item v3.FilterItem) (string, error) {
|
||||
func GetJSONFilter(item v3.FilterItem, isEscaped bool) (string, error) {
|
||||
|
||||
dataType := item.Key.DataType
|
||||
isArray := false
|
||||
@@ -65,13 +65,13 @@ func GetJSONFilter(item v3.FilterItem) (string, error) {
|
||||
case v3.FilterOperatorExists, v3.FilterOperatorNotExists:
|
||||
filter = fmt.Sprintf(logsOp, key, logsV3.GetPath(strings.Split(item.Key.Key, ".")[1:]))
|
||||
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex, v3.FilterOperatorHas, v3.FilterOperatorNotHas:
|
||||
fmtVal := utils.ClickHouseFormattedValue(value)
|
||||
fmtVal := utils.ClickHouseFormattedValue(value, isEscaped)
|
||||
filter = fmt.Sprintf(logsOp, key, fmtVal)
|
||||
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
|
||||
val := utils.QuoteEscapedString(fmt.Sprintf("%v", item.Value))
|
||||
filter = fmt.Sprintf("%s %s '%%%s%%'", key, logsOp, val)
|
||||
default:
|
||||
fmtVal := utils.ClickHouseFormattedValue(value)
|
||||
fmtVal := utils.ClickHouseFormattedValue(value, isEscaped)
|
||||
filter = fmt.Sprintf("%s %s %s", key, logsOp, fmtVal)
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -253,7 +253,7 @@ var testGetJSONFilterData = []struct {
|
||||
func TestGetJSONFilter(t *testing.T) {
|
||||
for _, tt := range testGetJSONFilterData {
|
||||
Convey("testGetJSONFilter", t, func() {
|
||||
filter, err := GetJSONFilter(tt.FilterItem)
|
||||
filter, err := GetJSONFilter(tt.FilterItem, false)
|
||||
if tt.Error {
|
||||
So(err, ShouldNotBeNil)
|
||||
} else {
|
||||
|
||||
@@ -113,7 +113,7 @@ func getExistsNexistsFilter(op v3.FilterOperator, item v3.FilterItem) string {
|
||||
return fmt.Sprintf(logOperators[op], columnType, columnDataType, item.Key.Key)
|
||||
}
|
||||
|
||||
func buildAttributeFilter(item v3.FilterItem) (string, error) {
|
||||
func buildAttributeFilter(item v3.FilterItem, isEscaped bool) (string, error) {
|
||||
// check if the user is searching for value in all attributes
|
||||
key := item.Key.Key
|
||||
op := v3.FilterOperator(strings.ToLower(string(item.Operator)))
|
||||
@@ -133,12 +133,12 @@ func buildAttributeFilter(item v3.FilterItem) (string, error) {
|
||||
if (op != v3.FilterOperatorEqual && op != v3.FilterOperatorContains) || item.Key.DataType != v3.AttributeKeyDataTypeString {
|
||||
return "", fmt.Errorf("only = operator and string data type is supported for __attrs")
|
||||
}
|
||||
val := utils.ClickHouseFormattedValue(item.Value)
|
||||
val := utils.ClickHouseFormattedValue(item.Value, isEscaped)
|
||||
return fmt.Sprintf("has(mapValues(attributes_string), %s)", val), nil
|
||||
}
|
||||
|
||||
keyName := getClickhouseKey(item.Key)
|
||||
fmtVal := utils.ClickHouseFormattedValue(value)
|
||||
fmtVal := utils.ClickHouseFormattedValue(value, isEscaped)
|
||||
|
||||
if logsOp, ok := logOperators[op]; ok {
|
||||
switch op {
|
||||
@@ -148,8 +148,16 @@ func buildAttributeFilter(item v3.FilterItem) (string, error) {
|
||||
|
||||
return fmt.Sprintf(logsOp, keyName, fmtVal), nil
|
||||
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
|
||||
// we also want to treat %, _ as literals for contains
|
||||
val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value), false)
|
||||
var val string
|
||||
if !isEscaped {
|
||||
val = utils.QuoteEscapedString(fmt.Sprintf("%s", item.Value))
|
||||
} else {
|
||||
val = fmt.Sprintf("%s", item.Value)
|
||||
}
|
||||
|
||||
// we want to treat %, _ as literals for contains
|
||||
val = utils.EscapedStringForContains(val, false)
|
||||
|
||||
// for body the contains is case insensitive
|
||||
if keyName == BODY {
|
||||
logsOp = strings.Replace(logsOp, "ILIKE", "LIKE", 1) // removing i from ilike and not ilike
|
||||
@@ -159,7 +167,12 @@ func buildAttributeFilter(item v3.FilterItem) (string, error) {
|
||||
}
|
||||
case v3.FilterOperatorLike, v3.FilterOperatorNotLike:
|
||||
// for body use lower for like and ilike
|
||||
val := utils.QuoteEscapedString(fmt.Sprintf("%s", item.Value))
|
||||
var val string
|
||||
if isEscaped {
|
||||
val = utils.QuoteEscapedString(fmt.Sprintf("%s", item.Value))
|
||||
} else {
|
||||
val = fmt.Sprintf("%s", item.Value)
|
||||
}
|
||||
if keyName == BODY {
|
||||
logsOp = strings.Replace(logsOp, "ILIKE", "LIKE", 1) // removing i from ilike and not ilike
|
||||
return fmt.Sprintf("lower(%s) %s lower('%s')", keyName, logsOp, val), nil
|
||||
@@ -174,7 +187,7 @@ func buildAttributeFilter(item v3.FilterItem) (string, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey) (string, error) {
|
||||
func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey, isEscaped bool) (string, error) {
|
||||
var conditions []string
|
||||
|
||||
if fs == nil || len(fs.Items) == 0 {
|
||||
@@ -189,7 +202,7 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey,
|
||||
|
||||
// if the filter is json filter
|
||||
if item.Key.IsJSON {
|
||||
filter, err := GetJSONFilter(item)
|
||||
filter, err := GetJSONFilter(item, isEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -198,7 +211,7 @@ func buildLogsTimeSeriesFilterQuery(fs *v3.FilterSet, groupBy []v3.AttributeKey,
|
||||
}
|
||||
|
||||
// generate the filter
|
||||
filter, err := buildAttributeFilter(item)
|
||||
filter, err := buildAttributeFilter(item, isEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -342,7 +355,7 @@ func generateAggregateClause(aggOp v3.AggregateOperator,
|
||||
}
|
||||
}
|
||||
|
||||
func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.BuilderQuery, graphLimitQtype string) (string, error) {
|
||||
func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.BuilderQuery, graphLimitQtype string, isEscaped bool) (string, error) {
|
||||
// timerange will be sent in epoch millisecond
|
||||
logsStart := utils.GetEpochNanoSecs(start)
|
||||
logsEnd := utils.GetEpochNanoSecs(end)
|
||||
@@ -355,7 +368,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
|
||||
timeFilter := fmt.Sprintf("(timestamp >= %d AND timestamp <= %d) AND (ts_bucket_start >= %d AND ts_bucket_start <= %d)", logsStart, logsEnd, bucketStart, bucketEnd)
|
||||
|
||||
// build the where clause for main table
|
||||
filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy, mq.AggregateAttribute)
|
||||
filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy, mq.AggregateAttribute, isEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -364,7 +377,7 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
|
||||
}
|
||||
|
||||
// build the where clause for resource table
|
||||
resourceSubQuery, err := resource.BuildResourceSubQuery(DB_NAME, DISTRIBUTED_LOGS_V2_RESOURCE, bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false)
|
||||
resourceSubQuery, err := resource.BuildResourceSubQuery(DB_NAME, DISTRIBUTED_LOGS_V2_RESOURCE, bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false, isEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -446,14 +459,14 @@ func buildLogsQuery(panelType v3.PanelType, start, end, step int64, mq *v3.Build
|
||||
return query, nil
|
||||
}
|
||||
|
||||
func buildLogsLiveTailQuery(mq *v3.BuilderQuery) (string, error) {
|
||||
filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy, v3.AttributeKey{})
|
||||
func buildLogsLiveTailQuery(mq *v3.BuilderQuery, isEscaped bool) (string, error) {
|
||||
filterSubQuery, err := buildLogsTimeSeriesFilterQuery(mq.Filters, mq.GroupBy, v3.AttributeKey{}, isEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// no values for bucket start and end
|
||||
resourceSubQuery, err := resource.BuildResourceSubQuery(DB_NAME, DISTRIBUTED_LOGS_V2_RESOURCE, 0, 0, mq.Filters, mq.GroupBy, mq.AggregateAttribute, true)
|
||||
resourceSubQuery, err := resource.BuildResourceSubQuery(DB_NAME, DISTRIBUTED_LOGS_V2_RESOURCE, 0, 0, mq.Filters, mq.GroupBy, mq.AggregateAttribute, true, isEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -491,14 +504,14 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
|
||||
// }
|
||||
|
||||
if options.IsLivetailQuery {
|
||||
query, err := buildLogsLiveTailQuery(mq)
|
||||
query, err := buildLogsLiveTailQuery(mq, options.ValuesEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return query, nil
|
||||
} else if options.GraphLimitQtype == constants.FirstQueryGraphLimit {
|
||||
// give me just the group_by names (no values)
|
||||
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype)
|
||||
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype, options.ValuesEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -506,14 +519,14 @@ func PrepareLogsQuery(start, end int64, queryType v3.QueryType, panelType v3.Pan
|
||||
|
||||
return query, nil
|
||||
} else if options.GraphLimitQtype == constants.SecondQueryGraphLimit {
|
||||
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype)
|
||||
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype, options.ValuesEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return query, nil
|
||||
}
|
||||
|
||||
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype)
|
||||
query, err := buildLogsQuery(panelType, start, end, mq.StepInterval, mq, options.GraphLimitQtype, options.ValuesEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -169,7 +169,8 @@ func Test_getExistsNexistsFilter(t *testing.T) {
|
||||
|
||||
func Test_buildAttributeFilter(t *testing.T) {
|
||||
type args struct {
|
||||
item v3.FilterItem
|
||||
item v3.FilterItem
|
||||
isEscaped bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -297,10 +298,42 @@ func Test_buildAttributeFilter(t *testing.T) {
|
||||
},
|
||||
want: "lower(body) LIKE lower('test')",
|
||||
},
|
||||
{
|
||||
name: "build attribute filter contains- body escaped",
|
||||
args: args{
|
||||
item: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
IsColumn: true,
|
||||
},
|
||||
Operator: v3.FilterOperatorContains,
|
||||
Value: `{\\"hello\\": \\"wo_rld\\"}`,
|
||||
},
|
||||
isEscaped: true,
|
||||
},
|
||||
want: `lower(body) LIKE lower('%{\\"hello\\": \\"wo\_rld\\"}%')`,
|
||||
},
|
||||
{
|
||||
name: "build attribute filter eq- body escaped",
|
||||
args: args{
|
||||
item: v3.FilterItem{
|
||||
Key: v3.AttributeKey{
|
||||
Key: "body",
|
||||
DataType: v3.AttributeKeyDataTypeString,
|
||||
IsColumn: true,
|
||||
},
|
||||
Operator: v3.FilterOperatorEqual,
|
||||
Value: `{\\"hello\\": \\"wo_rld\\"}`,
|
||||
},
|
||||
isEscaped: true,
|
||||
},
|
||||
want: `body = '{\\"hello\\": \\"wo_rld\\"}'`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := buildAttributeFilter(tt.args.item)
|
||||
got, err := buildAttributeFilter(tt.args.item, tt.args.isEscaped)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("buildAttributeFilter() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -317,6 +350,7 @@ func Test_buildLogsTimeSeriesFilterQuery(t *testing.T) {
|
||||
fs *v3.FilterSet
|
||||
groupBy []v3.AttributeKey
|
||||
aggregateAttribute v3.AttributeKey
|
||||
isEscaped bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -436,7 +470,7 @@ func Test_buildLogsTimeSeriesFilterQuery(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := buildLogsTimeSeriesFilterQuery(tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute)
|
||||
got, err := buildLogsTimeSeriesFilterQuery(tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute, tt.args.isEscaped)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("buildLogsTimeSeriesFilterQuery() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -641,6 +675,7 @@ func Test_buildLogsQuery(t *testing.T) {
|
||||
step int64
|
||||
mq *v3.BuilderQuery
|
||||
graphLimitQtype string
|
||||
isEscaped bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -785,7 +820,7 @@ func Test_buildLogsQuery(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := buildLogsQuery(tt.args.panelType, tt.args.start, tt.args.end, tt.args.step, tt.args.mq, tt.args.graphLimitQtype)
|
||||
got, err := buildLogsQuery(tt.args.panelType, tt.args.start, tt.args.end, tt.args.step, tt.args.mq, tt.args.graphLimitQtype, tt.args.isEscaped)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("buildLogsQuery() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
||||
@@ -296,7 +296,7 @@ func orderByAttributeKeyTags(items []v3.OrderBy, tags []v3.AttributeKey) string
|
||||
func having(items []v3.Having) string {
|
||||
var having []string
|
||||
for _, item := range items {
|
||||
having = append(having, fmt.Sprintf("%s %s %v", "value", item.Operator, utils.ClickHouseFormattedValue(item.Value)))
|
||||
having = append(having, fmt.Sprintf("%s %s %v", "value", item.Operator, utils.ClickHouseFormattedValue(item.Value, false)))
|
||||
}
|
||||
return strings.Join(having, " AND ")
|
||||
}
|
||||
|
||||
@@ -282,7 +282,7 @@ func PrepareTimeseriesFilterQuery(start, end int64, mq *v3.BuilderQuery) (string
|
||||
}
|
||||
var fmtVal string
|
||||
if op != v3.FilterOperatorExists && op != v3.FilterOperatorNotExists {
|
||||
fmtVal = utils.ClickHouseFormattedValue(toFormat)
|
||||
fmtVal = utils.ClickHouseFormattedValue(toFormat, false)
|
||||
}
|
||||
switch op {
|
||||
case v3.FilterOperatorEqual:
|
||||
@@ -364,7 +364,7 @@ func PrepareTimeseriesFilterQueryV3(start, end int64, mq *v3.BuilderQuery) (stri
|
||||
if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains {
|
||||
toFormat = fmt.Sprintf("%%%s%%", toFormat)
|
||||
}
|
||||
fmtVal := utils.ClickHouseFormattedValue(toFormat)
|
||||
fmtVal := utils.ClickHouseFormattedValue(toFormat, false)
|
||||
switch op {
|
||||
case v3.FilterOperatorEqual:
|
||||
conditions = append(conditions, fmt.Sprintf("JSONExtractString(labels, '%s') = %s", item.Key.Key, fmtVal))
|
||||
|
||||
@@ -889,7 +889,7 @@ func ParseQueryRangeParams(r *http.Request) (*v3.QueryRangeParamsV3, *model.ApiE
|
||||
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypePromQL {
|
||||
formattedVars[name] = metrics.PromFormattedValue(value)
|
||||
} else if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeClickHouseSQL {
|
||||
formattedVars[name] = utils.ClickHouseFormattedValue(value)
|
||||
formattedVars[name] = utils.ClickHouseFormattedValue(value, queryRangeParams.ValuesEscaped)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ func prepareLogsQuery(_ context.Context,
|
||||
params.CompositeQuery.QueryType,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit},
|
||||
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
return query, err
|
||||
@@ -56,7 +56,7 @@ func prepareLogsQuery(_ context.Context,
|
||||
params.CompositeQuery.QueryType,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit},
|
||||
v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
return query, err
|
||||
@@ -71,7 +71,7 @@ func prepareLogsQuery(_ context.Context,
|
||||
params.CompositeQuery.QueryType,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{},
|
||||
v3.QBOptions{ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
return query, err
|
||||
@@ -184,7 +184,7 @@ func (q *querier) runBuilderQuery(
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit},
|
||||
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||
@@ -195,7 +195,7 @@ func (q *querier) runBuilderQuery(
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit},
|
||||
v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||
@@ -208,7 +208,7 @@ func (q *querier) runBuilderQuery(
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{},
|
||||
v3.QBOptions{ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
|
||||
|
||||
@@ -43,6 +43,7 @@ type querier struct {
|
||||
fluxInterval time.Duration
|
||||
|
||||
builder *queryBuilder.QueryBuilder
|
||||
|
||||
// used for testing
|
||||
// TODO(srikanthccv): remove this once we have a proper mock
|
||||
testingMode bool
|
||||
|
||||
@@ -44,7 +44,7 @@ func prepareLogsQuery(_ context.Context,
|
||||
params.CompositeQuery.QueryType,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit},
|
||||
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
return query, err
|
||||
@@ -55,7 +55,7 @@ func prepareLogsQuery(_ context.Context,
|
||||
params.CompositeQuery.QueryType,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit},
|
||||
v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
return query, err
|
||||
@@ -70,7 +70,7 @@ func prepareLogsQuery(_ context.Context,
|
||||
params.CompositeQuery.QueryType,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{},
|
||||
v3.QBOptions{ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
return query, err
|
||||
@@ -184,7 +184,7 @@ func (q *querier) runBuilderQuery(
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit},
|
||||
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||
@@ -195,7 +195,7 @@ func (q *querier) runBuilderQuery(
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit},
|
||||
v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: limitQuery, Series: nil}
|
||||
@@ -208,7 +208,7 @@ func (q *querier) runBuilderQuery(
|
||||
end,
|
||||
params.CompositeQuery.PanelType,
|
||||
builderQuery,
|
||||
v3.QBOptions{},
|
||||
v3.QBOptions{ValuesEscaped: params.ValuesEscaped},
|
||||
)
|
||||
if err != nil {
|
||||
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/cache"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
@@ -45,7 +46,8 @@ type prepareLogsQueryFunc func(start, end int64, queryType v3.QueryType, panelTy
|
||||
type prepareMetricQueryFunc func(start, end int64, queryType v3.QueryType, panelType v3.PanelType, bq *v3.BuilderQuery, options metricsV3.Options) (string, error)
|
||||
|
||||
type QueryBuilder struct {
|
||||
options QueryBuilderOptions
|
||||
options QueryBuilderOptions
|
||||
featureFlags interfaces.FeatureLookup
|
||||
}
|
||||
|
||||
type QueryBuilderOptions struct {
|
||||
@@ -191,12 +193,12 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[strin
|
||||
// for ts query with group by and limit form two queries
|
||||
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
|
||||
limitQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType, query,
|
||||
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit})
|
||||
v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, ValuesEscaped: params.ValuesEscaped})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
placeholderQuery, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType,
|
||||
query, v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit})
|
||||
query, v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, ValuesEscaped: params.ValuesEscaped})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -204,7 +206,7 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[strin
|
||||
queries[queryName] = query
|
||||
} else {
|
||||
queryString, err := qb.options.BuildTraceQuery(start, end, compositeQuery.PanelType,
|
||||
query, v3.QBOptions{GraphLimitQtype: ""})
|
||||
query, v3.QBOptions{GraphLimitQtype: "", ValuesEscaped: params.ValuesEscaped})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -213,18 +215,18 @@ func (qb *QueryBuilder) PrepareQueries(params *v3.QueryRangeParamsV3) (map[strin
|
||||
case v3.DataSourceLogs:
|
||||
// for ts query with limit replace it as it is already formed
|
||||
if compositeQuery.PanelType == v3.PanelTypeGraph && query.Limit > 0 && len(query.GroupBy) > 0 {
|
||||
limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit})
|
||||
limitQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.QBOptions{GraphLimitQtype: constants.FirstQueryGraphLimit, ValuesEscaped: params.ValuesEscaped})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit})
|
||||
placeholderQuery, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.QBOptions{GraphLimitQtype: constants.SecondQueryGraphLimit, ValuesEscaped: params.ValuesEscaped})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
query := fmt.Sprintf(placeholderQuery, limitQuery)
|
||||
queries[queryName] = query
|
||||
} else {
|
||||
queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.QBOptions{GraphLimitQtype: ""})
|
||||
queryString, err := qb.options.BuildLogQuery(start, end, compositeQuery.QueryType, compositeQuery.PanelType, query, v3.QBOptions{GraphLimitQtype: "", ValuesEscaped: params.ValuesEscaped})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -28,14 +28,14 @@ var resourceLogOperators = map[v3.FilterOperator]string{
|
||||
}
|
||||
|
||||
// buildResourceFilter builds a clickhouse filter string for resource labels
|
||||
func buildResourceFilter(logsOp string, key string, op v3.FilterOperator, value interface{}) string {
|
||||
func buildResourceFilter(logsOp string, key string, op v3.FilterOperator, value interface{}, isEscaped bool) string {
|
||||
// for all operators except contains and like
|
||||
searchKey := fmt.Sprintf("simpleJSONExtractString(labels, '%s')", key)
|
||||
|
||||
// for contains and like it will be case insensitive
|
||||
lowerSearchKey := fmt.Sprintf("simpleJSONExtractString(lower(labels), '%s')", key)
|
||||
|
||||
chFmtVal := utils.ClickHouseFormattedValue(value)
|
||||
chFmtVal := utils.ClickHouseFormattedValue(value, isEscaped)
|
||||
|
||||
lowerValue := strings.ToLower(fmt.Sprintf("%s", value))
|
||||
|
||||
@@ -47,14 +47,25 @@ func buildResourceFilter(logsOp string, key string, op v3.FilterOperator, value
|
||||
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex:
|
||||
return fmt.Sprintf(logsOp, searchKey, chFmtVal)
|
||||
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
|
||||
// this is required as clickhouseFormattedValue add's quotes to the string
|
||||
|
||||
var val string
|
||||
if !isEscaped {
|
||||
val = utils.QuoteEscapedString(lowerValue)
|
||||
} else {
|
||||
val = lowerValue
|
||||
}
|
||||
|
||||
// we also want to treat %, _ as literals for contains
|
||||
escapedStringValue := utils.QuoteEscapedStringForContains(lowerValue, false)
|
||||
return fmt.Sprintf("%s %s '%%%s%%'", lowerSearchKey, logsOp, escapedStringValue)
|
||||
val = utils.EscapedStringForContains(val, false)
|
||||
return fmt.Sprintf("%s %s '%%%s%%'", lowerSearchKey, logsOp, val)
|
||||
case v3.FilterOperatorLike, v3.FilterOperatorNotLike:
|
||||
// this is required as clickhouseFormattedValue add's quotes to the string
|
||||
escapedStringValue := utils.QuoteEscapedString(lowerValue)
|
||||
return fmt.Sprintf("%s %s '%s'", lowerSearchKey, logsOp, escapedStringValue)
|
||||
var val string
|
||||
if !isEscaped {
|
||||
val = utils.QuoteEscapedString(lowerValue)
|
||||
} else {
|
||||
val = lowerValue
|
||||
}
|
||||
return fmt.Sprintf("%s %s '%s'", lowerSearchKey, logsOp, val)
|
||||
default:
|
||||
return fmt.Sprintf("%s %s %s", searchKey, logsOp, chFmtVal)
|
||||
}
|
||||
@@ -63,7 +74,7 @@ func buildResourceFilter(logsOp string, key string, op v3.FilterOperator, value
|
||||
// buildIndexFilterForInOperator builds a clickhouse filter string for in operator
|
||||
// example:= x in a,b,c = (labels like '%"x"%"a"%' or labels like '%"x":"b"%' or labels like '%"x"="c"%')
|
||||
// example:= x nin a,b,c = (labels nlike '%"x"%"a"%' AND labels nlike '%"x"="b"' AND labels nlike '%"x"="c"%')
|
||||
func buildIndexFilterForInOperator(key string, op v3.FilterOperator, value interface{}) string {
|
||||
func buildIndexFilterForInOperator(key string, op v3.FilterOperator, value interface{}, isEscaped bool) string {
|
||||
conditions := []string{}
|
||||
separator := " OR "
|
||||
sqlOp := "like"
|
||||
@@ -92,8 +103,18 @@ func buildIndexFilterForInOperator(key string, op v3.FilterOperator, value inter
|
||||
// if there are no values to filter on, return an empty string
|
||||
if len(values) > 0 {
|
||||
for _, v := range values {
|
||||
value := utils.QuoteEscapedStringForContains(v, true)
|
||||
conditions = append(conditions, fmt.Sprintf("labels %s '%%\"%s\":\"%s\"%%'", sqlOp, key, value))
|
||||
|
||||
var val string
|
||||
if !isEscaped {
|
||||
val = utils.QuoteEscapedString(v)
|
||||
} else {
|
||||
val = v
|
||||
}
|
||||
|
||||
// we also want to treat %, _ as literals for contains
|
||||
val = utils.EscapedStringForContains(val, true)
|
||||
|
||||
conditions = append(conditions, fmt.Sprintf("labels %s '%%\"%s\":\"%s\"%%'", sqlOp, key, val))
|
||||
}
|
||||
return "(" + strings.Join(conditions, separator) + ")"
|
||||
}
|
||||
@@ -107,10 +128,18 @@ func buildIndexFilterForInOperator(key string, op v3.FilterOperator, value inter
|
||||
// for like/contains we will use lower index
|
||||
// we can use lower index for =, in etc but it's difficult to do it for !=, NIN etc
|
||||
// if as x != "ABC" we cannot predict something like "not lower(labels) like '%%x%%abc%%'". It has it be "not lower(labels) like '%%x%%ABC%%'"
|
||||
func buildResourceIndexFilter(key string, op v3.FilterOperator, value interface{}) string {
|
||||
func buildResourceIndexFilter(key string, op v3.FilterOperator, value interface{}, isEscaped bool) string {
|
||||
// not using clickhouseFormattedValue as we don't wan't the quotes
|
||||
strVal := fmt.Sprintf("%s", value)
|
||||
fmtValEscapedForContains := utils.QuoteEscapedStringForContains(strVal, true)
|
||||
|
||||
var fmtValEscapedForContains string
|
||||
if !isEscaped {
|
||||
fmtValEscapedForContains = utils.QuoteEscapedString(strVal)
|
||||
} else {
|
||||
fmtValEscapedForContains = strVal
|
||||
}
|
||||
|
||||
fmtValEscapedForContains = utils.EscapedStringForContains(fmtValEscapedForContains, true)
|
||||
fmtValEscapedForContainsLower := strings.ToLower(fmtValEscapedForContains)
|
||||
fmtValEscapedLower := strings.ToLower(utils.QuoteEscapedString(strVal))
|
||||
|
||||
@@ -132,7 +161,7 @@ func buildResourceIndexFilter(key string, op v3.FilterOperator, value interface{
|
||||
// don't try to do anything for regex.
|
||||
return ""
|
||||
case v3.FilterOperatorIn, v3.FilterOperatorNotIn:
|
||||
return buildIndexFilterForInOperator(key, op, value)
|
||||
return buildIndexFilterForInOperator(key, op, value, isEscaped)
|
||||
default:
|
||||
return fmt.Sprintf("labels like '%%%s%%'", key)
|
||||
}
|
||||
@@ -140,7 +169,7 @@ func buildResourceIndexFilter(key string, op v3.FilterOperator, value interface{
|
||||
|
||||
// buildResourceFiltersFromFilterItems builds a list of clickhouse filter strings for resource labels from a FilterSet.
|
||||
// It skips any filter items that are not resource attributes and checks that the operator is supported and the data type is correct.
|
||||
func buildResourceFiltersFromFilterItems(fs *v3.FilterSet) ([]string, error) {
|
||||
func buildResourceFiltersFromFilterItems(fs *v3.FilterSet, isEscaped bool) ([]string, error) {
|
||||
var conditions []string
|
||||
if fs == nil || len(fs.Items) == 0 {
|
||||
return nil, nil
|
||||
@@ -175,11 +204,11 @@ func buildResourceFiltersFromFilterItems(fs *v3.FilterSet) ([]string, error) {
|
||||
|
||||
if logsOp, ok := resourceLogOperators[op]; ok {
|
||||
// the filter
|
||||
if resourceFilter := buildResourceFilter(logsOp, keyName, op, value); resourceFilter != "" {
|
||||
if resourceFilter := buildResourceFilter(logsOp, keyName, op, value, isEscaped); resourceFilter != "" {
|
||||
conditions = append(conditions, resourceFilter)
|
||||
}
|
||||
// the additional filter for better usage of the index
|
||||
if resourceIndexFilter := buildResourceIndexFilter(keyName, op, value); resourceIndexFilter != "" {
|
||||
if resourceIndexFilter := buildResourceIndexFilter(keyName, op, value, isEscaped); resourceIndexFilter != "" {
|
||||
conditions = append(conditions, resourceIndexFilter)
|
||||
}
|
||||
} else {
|
||||
@@ -211,12 +240,12 @@ func buildResourceFiltersFromAggregateAttribute(aggregateAttribute v3.AttributeK
|
||||
return ""
|
||||
}
|
||||
|
||||
func BuildResourceSubQuery(dbName, tableName string, bucketStart, bucketEnd int64, fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey, isLiveTail bool) (string, error) {
|
||||
func BuildResourceSubQuery(dbName, tableName string, bucketStart, bucketEnd int64, fs *v3.FilterSet, groupBy []v3.AttributeKey, aggregateAttribute v3.AttributeKey, isLiveTail bool, isEscaped bool) (string, error) {
|
||||
|
||||
// BUILD THE WHERE CLAUSE
|
||||
var conditions []string
|
||||
// only add the resource attributes to the filters here
|
||||
rs, err := buildResourceFiltersFromFilterItems(fs)
|
||||
rs, err := buildResourceFiltersFromFilterItems(fs, isEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
|
||||
func Test_buildResourceFilter(t *testing.T) {
|
||||
type args struct {
|
||||
logsOp string
|
||||
key string
|
||||
op v3.FilterOperator
|
||||
value interface{}
|
||||
logsOp string
|
||||
key string
|
||||
op v3.FilterOperator
|
||||
value interface{}
|
||||
isEscaped bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -88,7 +89,7 @@ func Test_buildResourceFilter(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := buildResourceFilter(tt.args.logsOp, tt.args.key, tt.args.op, tt.args.value); got != tt.want {
|
||||
if got := buildResourceFilter(tt.args.logsOp, tt.args.key, tt.args.op, tt.args.value, tt.args.isEscaped); got != tt.want {
|
||||
t.Errorf("buildResourceFilter() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@@ -97,9 +98,10 @@ func Test_buildResourceFilter(t *testing.T) {
|
||||
|
||||
func Test_buildIndexFilterForInOperator(t *testing.T) {
|
||||
type args struct {
|
||||
key string
|
||||
op v3.FilterOperator
|
||||
value interface{}
|
||||
key string
|
||||
op v3.FilterOperator
|
||||
value interface{}
|
||||
isEscaped bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -142,10 +144,20 @@ func Test_buildIndexFilterForInOperator(t *testing.T) {
|
||||
},
|
||||
want: `(labels not like '%"service.name":"application\'\\\\"\_s"%')`,
|
||||
},
|
||||
{
|
||||
name: "test nin string with escaped quotes",
|
||||
args: args{
|
||||
key: "service.name",
|
||||
op: v3.FilterOperatorNotIn,
|
||||
value: `application\'"_s`,
|
||||
isEscaped: true,
|
||||
},
|
||||
want: `(labels not like '%"service.name":"application\'\\\\"\_s"%')`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := buildIndexFilterForInOperator(tt.args.key, tt.args.op, tt.args.value); got != tt.want {
|
||||
if got := buildIndexFilterForInOperator(tt.args.key, tt.args.op, tt.args.value, tt.args.isEscaped); got != tt.want {
|
||||
t.Errorf("buildIndexFilterForInOperator() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@@ -154,9 +166,10 @@ func Test_buildIndexFilterForInOperator(t *testing.T) {
|
||||
|
||||
func Test_buildResourceIndexFilter(t *testing.T) {
|
||||
type args struct {
|
||||
key string
|
||||
op v3.FilterOperator
|
||||
value interface{}
|
||||
key string
|
||||
op v3.FilterOperator
|
||||
value interface{}
|
||||
isEscaped bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -235,10 +248,20 @@ func Test_buildResourceIndexFilter(t *testing.T) {
|
||||
},
|
||||
want: `labels like '%service.name%Application\\\\"%'`,
|
||||
},
|
||||
{
|
||||
name: "test eq with escaped quotes",
|
||||
args: args{
|
||||
key: "service.name",
|
||||
op: v3.FilterOperatorEqual,
|
||||
value: `App\\lication"`,
|
||||
isEscaped: true,
|
||||
},
|
||||
want: `labels like '%service.name%App\\lication\\\\"%'`,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := buildResourceIndexFilter(tt.args.key, tt.args.op, tt.args.value); got != tt.want {
|
||||
if got := buildResourceIndexFilter(tt.args.key, tt.args.op, tt.args.value, tt.args.isEscaped); got != tt.want {
|
||||
t.Errorf("buildResourceIndexFilter() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@@ -247,7 +270,8 @@ func Test_buildResourceIndexFilter(t *testing.T) {
|
||||
|
||||
func Test_buildResourceFiltersFromFilterItems(t *testing.T) {
|
||||
type args struct {
|
||||
fs *v3.FilterSet
|
||||
fs *v3.FilterSet
|
||||
isEscaped bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -335,7 +359,7 @@ func Test_buildResourceFiltersFromFilterItems(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := buildResourceFiltersFromFilterItems(tt.args.fs)
|
||||
got, err := buildResourceFiltersFromFilterItems(tt.args.fs, tt.args.isEscaped)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("buildResourceFiltersFromFilterItems() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -439,6 +463,7 @@ func Test_buildResourceSubQuery(t *testing.T) {
|
||||
fs *v3.FilterSet
|
||||
groupBy []v3.AttributeKey
|
||||
aggregateAttribute v3.AttributeKey
|
||||
isEscaped bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -497,7 +522,7 @@ func Test_buildResourceSubQuery(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := BuildResourceSubQuery("signoz_logs", "distributed_logs_v2_resource", tt.args.bucketStart, tt.args.bucketEnd, tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute, false)
|
||||
got, err := BuildResourceSubQuery("signoz_logs", "distributed_logs_v2_resource", tt.args.bucketStart, tt.args.bucketEnd, tt.args.fs, tt.args.groupBy, tt.args.aggregateAttribute, false, tt.args.isEscaped)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("buildResourceSubQuery() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package model
|
||||
package smart
|
||||
|
||||
type SpanForTraceDetails struct {
|
||||
TimeUnixNano uint64 `json:"timestamp"`
|
||||
@@ -15,8 +15,3 @@ type SpanForTraceDetails struct {
|
||||
HasError bool `json:"hasError"`
|
||||
Children []*SpanForTraceDetails `json:"children"`
|
||||
}
|
||||
|
||||
type GetSpansSubQueryDBResponse struct {
|
||||
SpanID string `ch:"spanID"`
|
||||
TraceID string `ch:"traceID"`
|
||||
}
|
||||
@@ -1,17 +1,16 @@
|
||||
package db
|
||||
package smart
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// SmartTraceAlgorithm is an algorithm to find the target span and build a tree of spans around it with the given levelUp and levelDown parameters and the given spanLimit
|
||||
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
|
||||
var spans []*model.SpanForTraceDetails
|
||||
var spans []*SpanForTraceDetails
|
||||
|
||||
// if targetSpanId is null or not present then randomly select a span as targetSpanId
|
||||
if (targetSpanId == "" || targetSpanId == "null") && len(payload) > 0 {
|
||||
@@ -24,7 +23,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
|
||||
parentID = spanItem.References[0].SpanId
|
||||
}
|
||||
span := &model.SpanForTraceDetails{
|
||||
span := &SpanForTraceDetails{
|
||||
TimeUnixNano: spanItem.TimeUnixNano,
|
||||
SpanID: spanItem.SpanID,
|
||||
TraceID: spanItem.TraceID,
|
||||
@@ -45,7 +44,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetSpan := &model.SpanForTraceDetails{}
|
||||
targetSpan := &SpanForTraceDetails{}
|
||||
|
||||
// Find the target span in the span trees
|
||||
for _, root := range roots {
|
||||
@@ -65,7 +64,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Build the final result
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
parents := []*SpanForTraceDetails{}
|
||||
|
||||
// Get the parent spans of the target span up to the given levelUp parameter and spanLimit
|
||||
preParent := targetSpan
|
||||
@@ -90,11 +89,11 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Get the child spans of the target span until the given levelDown and spanLimit
|
||||
preParents := []*model.SpanForTraceDetails{targetSpan}
|
||||
children := []*model.SpanForTraceDetails{}
|
||||
preParents := []*SpanForTraceDetails{targetSpan}
|
||||
children := []*SpanForTraceDetails{}
|
||||
|
||||
for i := 0; i < levelDown && len(preParents) != 0 && spanLimit > 0; i++ {
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
parents := []*SpanForTraceDetails{}
|
||||
for _, parent := range preParents {
|
||||
if spanLimit-len(parent.Children) <= 0 {
|
||||
children = append(children, parent.Children[:spanLimit]...)
|
||||
@@ -108,7 +107,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Store the final list of spans in the resultSpanSet map to avoid duplicates
|
||||
resultSpansSet := make(map[*model.SpanForTraceDetails]struct{})
|
||||
resultSpansSet := make(map[*SpanForTraceDetails]struct{})
|
||||
resultSpansSet[targetSpan] = struct{}{}
|
||||
for _, parent := range parents {
|
||||
resultSpansSet[parent] = struct{}{}
|
||||
@@ -169,12 +168,12 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// buildSpanTrees builds trees of spans from a list of spans.
|
||||
func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTraceDetails, error) {
|
||||
func buildSpanTrees(spansPtr *[]*SpanForTraceDetails) ([]*SpanForTraceDetails, error) {
|
||||
|
||||
// Build a map of spanID to span for fast lookup
|
||||
var roots []*model.SpanForTraceDetails
|
||||
var roots []*SpanForTraceDetails
|
||||
spans := *spansPtr
|
||||
mapOfSpans := make(map[string]*model.SpanForTraceDetails, len(spans))
|
||||
mapOfSpans := make(map[string]*SpanForTraceDetails, len(spans))
|
||||
|
||||
for _, span := range spans {
|
||||
if span.ParentID == "" {
|
||||
@@ -206,8 +205,8 @@ func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTra
|
||||
}
|
||||
|
||||
// breadthFirstSearch performs a breadth-first search on the span tree to find the target span.
|
||||
func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*model.SpanForTraceDetails, error) {
|
||||
queue := []*model.SpanForTraceDetails{spansPtr}
|
||||
func breadthFirstSearch(spansPtr *SpanForTraceDetails, targetId string) (*SpanForTraceDetails, error) {
|
||||
queue := []*SpanForTraceDetails{spansPtr}
|
||||
visited := make(map[string]bool)
|
||||
|
||||
for len(queue) > 0 {
|
||||
@@ -162,7 +162,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
||||
}
|
||||
}
|
||||
if val != nil {
|
||||
fmtVal = utils.ClickHouseFormattedValue(val)
|
||||
fmtVal = utils.ClickHouseFormattedValue(val, false)
|
||||
}
|
||||
if operator, ok := tracesOperatorMappingV3[item.Operator]; ok {
|
||||
switch item.Operator {
|
||||
@@ -459,7 +459,7 @@ func Having(items []v3.Having) string {
|
||||
// aggregate something and filter on that aggregate
|
||||
var having []string
|
||||
for _, item := range items {
|
||||
having = append(having, fmt.Sprintf("value %s %s", item.Operator, utils.ClickHouseFormattedValue(item.Value)))
|
||||
having = append(having, fmt.Sprintf("value %s %s", item.Operator, utils.ClickHouseFormattedValue(item.Value, false)))
|
||||
}
|
||||
return strings.Join(having, " AND ")
|
||||
}
|
||||
|
||||
@@ -87,7 +87,7 @@ func existsSubQueryForFixedColumn(key v3.AttributeKey, op v3.FilterOperator) (st
|
||||
}
|
||||
}
|
||||
|
||||
func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
||||
func buildTracesFilterQuery(fs *v3.FilterSet, isEscaped bool) (string, error) {
|
||||
var conditions []string
|
||||
|
||||
if fs != nil && len(fs.Items) != 0 {
|
||||
@@ -111,13 +111,21 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
||||
}
|
||||
}
|
||||
if val != nil {
|
||||
fmtVal = utils.ClickHouseFormattedValue(val)
|
||||
fmtVal = utils.ClickHouseFormattedValue(val, isEscaped)
|
||||
}
|
||||
if operator, ok := tracesOperatorMappingV3[item.Operator]; ok {
|
||||
switch item.Operator {
|
||||
case v3.FilterOperatorContains, v3.FilterOperatorNotContains:
|
||||
// we also want to treat %, _ as literals for contains
|
||||
val := utils.QuoteEscapedStringForContains(fmt.Sprintf("%s", item.Value), false)
|
||||
var val string
|
||||
if !isEscaped {
|
||||
val = utils.QuoteEscapedString(fmt.Sprintf("%s", item.Value))
|
||||
} else {
|
||||
val = fmt.Sprintf("%s", item.Value)
|
||||
}
|
||||
|
||||
// we want to treat %, _ as literals for contains
|
||||
val = utils.EscapedStringForContains(val, false)
|
||||
conditions = append(conditions, fmt.Sprintf("%s %s '%%%s%%'", columnName, operator, val))
|
||||
case v3.FilterOperatorRegex, v3.FilterOperatorNotRegex:
|
||||
conditions = append(conditions, fmt.Sprintf(operator, columnName, fmtVal))
|
||||
@@ -148,7 +156,7 @@ func buildTracesFilterQuery(fs *v3.FilterSet) (string, error) {
|
||||
return queryString, nil
|
||||
}
|
||||
|
||||
func handleEmptyValuesInGroupBy(groupBy []v3.AttributeKey) (string, error) {
|
||||
func handleEmptyValuesInGroupBy(groupBy []v3.AttributeKey, isEscaped bool) (string, error) {
|
||||
// TODO(nitya): in future when we support user based mat column handle them
|
||||
// skipping now as we don't support creating them
|
||||
filterItems := []v3.FilterItem{}
|
||||
@@ -167,7 +175,7 @@ func handleEmptyValuesInGroupBy(groupBy []v3.AttributeKey) (string, error) {
|
||||
Operator: "AND",
|
||||
Items: filterItems,
|
||||
}
|
||||
return buildTracesFilterQuery(&filterSet)
|
||||
return buildTracesFilterQuery(&filterSet, isEscaped)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
@@ -248,7 +256,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
||||
|
||||
timeFilter := fmt.Sprintf("(timestamp >= '%d' AND timestamp <= '%d') AND (ts_bucket_start >= %d AND ts_bucket_start <= %d)", tracesStart, tracesEnd, bucketStart, bucketEnd)
|
||||
|
||||
filterSubQuery, err := buildTracesFilterQuery(mq.Filters)
|
||||
filterSubQuery, err := buildTracesFilterQuery(mq.Filters, options.ValuesEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -256,7 +264,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
||||
filterSubQuery = " AND " + filterSubQuery
|
||||
}
|
||||
|
||||
emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy)
|
||||
emptyValuesInGroupByFilter, err := handleEmptyValuesInGroupBy(mq.GroupBy, options.ValuesEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -264,7 +272,7 @@ func buildTracesQuery(start, end, step int64, mq *v3.BuilderQuery, panelType v3.
|
||||
filterSubQuery = filterSubQuery + " AND " + emptyValuesInGroupByFilter
|
||||
}
|
||||
|
||||
resourceSubQuery, err := resource.BuildResourceSubQuery("signoz_traces", "distributed_traces_v3_resource", bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false)
|
||||
resourceSubQuery, err := resource.BuildResourceSubQuery("signoz_traces", "distributed_traces_v3_resource", bucketStart, bucketEnd, mq.Filters, mq.GroupBy, mq.AggregateAttribute, false, options.ValuesEscaped)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -193,7 +193,8 @@ func Test_getSelectLabels(t *testing.T) {
|
||||
|
||||
func Test_buildTracesFilterQuery(t *testing.T) {
|
||||
type args struct {
|
||||
fs *v3.FilterSet
|
||||
fs *v3.FilterSet
|
||||
isEscaped bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -271,10 +272,32 @@ func Test_buildTracesFilterQuery(t *testing.T) {
|
||||
},
|
||||
want: "mapContains(attributes_string, 'host') AND mapContains(attributes_number, 'duration') AND NOT mapContains(attributes_bool, 'isDone') AND NOT mapContains(attributes_string, 'host1') AND `attribute_string_path` = '' AND http_url = '' AND `attribute_string_http$$route` = ''",
|
||||
},
|
||||
{
|
||||
name: "Test with isEscaped contains",
|
||||
args: args{
|
||||
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: `hello\name_`, Operator: v3.FilterOperatorContains},
|
||||
}},
|
||||
isEscaped: true,
|
||||
},
|
||||
want: `name ILIKE '%hello\name\_%'`,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "Test with isEscaped eq",
|
||||
args: args{
|
||||
fs: &v3.FilterSet{Operator: "AND", Items: []v3.FilterItem{
|
||||
{Key: v3.AttributeKey{Key: "name", DataType: v3.AttributeKeyDataTypeString, Type: v3.AttributeKeyTypeTag}, Value: `hello\name_`, Operator: v3.FilterOperatorEqual},
|
||||
}},
|
||||
isEscaped: true,
|
||||
},
|
||||
want: `name = 'hello\name_'`,
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := buildTracesFilterQuery(tt.args.fs)
|
||||
got, err := buildTracesFilterQuery(tt.args.fs, tt.args.isEscaped)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("buildTracesFilterQuery() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
@@ -315,7 +338,7 @@ func Test_handleEmptyValuesInGroupBy(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := handleEmptyValuesInGroupBy(tt.args.groupBy)
|
||||
got, err := handleEmptyValuesInGroupBy(tt.args.groupBy, false)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("handleEmptyValuesInGroupBy() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
||||
60
pkg/query-service/featureManager/manager.go
Normal file
60
pkg/query-service/featureManager/manager.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package featureManager
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type FeatureManager struct {
|
||||
}
|
||||
|
||||
func StartManager() *FeatureManager {
|
||||
fM := &FeatureManager{}
|
||||
return fM
|
||||
}
|
||||
|
||||
// CheckFeature will be internally used by backend routines
|
||||
// for feature gating
|
||||
func (fm *FeatureManager) CheckFeature(featureKey string) error {
|
||||
|
||||
feature, err := fm.GetFeatureFlag(featureKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if feature.Active {
|
||||
return nil
|
||||
}
|
||||
|
||||
return model.ErrFeatureUnavailable{Key: featureKey}
|
||||
}
|
||||
|
||||
// GetFeatureFlags returns current features
|
||||
func (fm *FeatureManager) GetFeatureFlags() (model.FeatureSet, error) {
|
||||
features := constants.DEFAULT_FEATURE_SET
|
||||
return features, nil
|
||||
}
|
||||
|
||||
func (fm *FeatureManager) InitFeatures(req model.FeatureSet) error {
|
||||
zap.L().Error("InitFeatures not implemented in OSS")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fm *FeatureManager) UpdateFeatureFlag(req model.Feature) error {
|
||||
zap.L().Error("UpdateFeatureFlag not implemented in OSS")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fm *FeatureManager) GetFeatureFlag(key string) (model.Feature, error) {
|
||||
features, err := fm.GetFeatureFlags()
|
||||
if err != nil {
|
||||
return model.Feature{}, err
|
||||
}
|
||||
for _, feature := range features {
|
||||
if feature.Name == key {
|
||||
return feature, nil
|
||||
}
|
||||
}
|
||||
return model.Feature{}, model.ErrFeatureUnavailable{Key: key}
|
||||
}
|
||||
13
pkg/query-service/interfaces/featureLookup.go
Normal file
13
pkg/query-service/interfaces/featureLookup.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package interfaces
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
type FeatureLookup interface {
|
||||
CheckFeature(f string) error
|
||||
GetFeatureFlags() (model.FeatureSet, error)
|
||||
GetFeatureFlag(f string) (model.Feature, error)
|
||||
UpdateFeatureFlag(features model.Feature) error
|
||||
InitFeatures(features model.FeatureSet) error
|
||||
}
|
||||
@@ -39,6 +39,7 @@ type Reader interface {
|
||||
GetNextPrevErrorIDs(ctx context.Context, params *model.GetErrorParams) (*model.NextPrevErrorIDs, *model.ApiError)
|
||||
|
||||
// Search Interfaces
|
||||
SearchTraces(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error)
|
||||
GetWaterfallSpansForTraceWithMetadata(ctx context.Context, traceID string, req *model.GetWaterfallSpansForTraceWithMetadataParams) (*model.GetWaterfallSpansForTraceWithMetadataResponse, *model.ApiError)
|
||||
GetFlamegraphSpansForTrace(ctx context.Context, traceID string, req *model.GetFlamegraphSpansForTraceParams) (*model.GetFlamegraphSpansForTraceResponse, *model.ApiError)
|
||||
|
||||
|
||||
@@ -420,6 +420,7 @@ type FilterAttributeValueResponse struct {
|
||||
}
|
||||
|
||||
type QueryRangeParamsV3 struct {
|
||||
ValuesEscaped bool `json:"valuesEscaped"`
|
||||
Start int64 `json:"start"`
|
||||
End int64 `json:"end"`
|
||||
Step int64 `json:"step"` // step is in seconds; used for prometheus queries
|
||||
@@ -1475,4 +1476,5 @@ type URLShareableOptions struct {
|
||||
type QBOptions struct {
|
||||
GraphLimitQtype string
|
||||
IsLivetailQuery bool
|
||||
ValuesEscaped bool
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func BuildFilterConditions(fs *v3.FilterSet, skipKey string) ([]string, error) {
|
||||
if op == v3.FilterOperatorContains || op == v3.FilterOperatorNotContains {
|
||||
toFormat = fmt.Sprintf("%%%s%%", toFormat)
|
||||
}
|
||||
fmtVal := ClickHouseFormattedValue(toFormat)
|
||||
fmtVal := ClickHouseFormattedValue(toFormat, false)
|
||||
|
||||
// Determine if the key is a JSON key or a normal column
|
||||
isJSONKey := false
|
||||
|
||||
@@ -159,10 +159,7 @@ func QuoteEscapedString(str string) string {
|
||||
return str
|
||||
}
|
||||
|
||||
func QuoteEscapedStringForContains(str string, isIndex bool) string {
|
||||
// https: //clickhouse.com/docs/en/sql-reference/functions/string-search-functions#like
|
||||
str = QuoteEscapedString(str)
|
||||
|
||||
func EscapedStringForContains(str string, isIndex bool) string {
|
||||
// we are adding this because if a string contains quote `"` it will be stored as \" in clickhouse
|
||||
// to query that using like our query should be \\\\"
|
||||
if isIndex {
|
||||
@@ -177,7 +174,7 @@ func QuoteEscapedStringForContains(str string, isIndex bool) string {
|
||||
}
|
||||
|
||||
// ClickHouseFormattedValue formats the value to be used in clickhouse query
|
||||
func ClickHouseFormattedValue(v interface{}) string {
|
||||
func ClickHouseFormattedValue(v interface{}, isEscaped bool) string {
|
||||
// if it's pointer convert it to a value
|
||||
v = getPointerValue(v)
|
||||
|
||||
@@ -187,7 +184,11 @@ func ClickHouseFormattedValue(v interface{}) string {
|
||||
case float32, float64:
|
||||
return fmt.Sprintf("%f", x)
|
||||
case string:
|
||||
return fmt.Sprintf("'%s'", QuoteEscapedString(x))
|
||||
if !isEscaped {
|
||||
return fmt.Sprintf("'%s'", QuoteEscapedString(x))
|
||||
} else {
|
||||
return fmt.Sprintf("'%s'", x)
|
||||
}
|
||||
case bool:
|
||||
return fmt.Sprintf("%v", x)
|
||||
|
||||
@@ -199,7 +200,11 @@ func ClickHouseFormattedValue(v interface{}) string {
|
||||
case string:
|
||||
str := "["
|
||||
for idx, sVal := range x {
|
||||
str += fmt.Sprintf("'%s'", QuoteEscapedString(sVal.(string)))
|
||||
if !isEscaped {
|
||||
str += fmt.Sprintf("'%s'", QuoteEscapedString(sVal.(string)))
|
||||
} else {
|
||||
str += fmt.Sprintf("'%s'", sVal.(string))
|
||||
}
|
||||
if idx != len(x)-1 {
|
||||
str += ","
|
||||
}
|
||||
@@ -218,7 +223,11 @@ func ClickHouseFormattedValue(v interface{}) string {
|
||||
}
|
||||
str := "["
|
||||
for idx, sVal := range x {
|
||||
str += fmt.Sprintf("'%s'", QuoteEscapedString(sVal))
|
||||
if !isEscaped {
|
||||
str += fmt.Sprintf("'%s'", QuoteEscapedString(sVal))
|
||||
} else {
|
||||
str += fmt.Sprintf("'%s'", sVal)
|
||||
}
|
||||
if idx != len(x)-1 {
|
||||
str += ","
|
||||
}
|
||||
@@ -234,13 +243,13 @@ func ClickHouseFormattedValue(v interface{}) string {
|
||||
func ClickHouseFormattedMetricNames(v interface{}) string {
|
||||
if name, ok := v.(string); ok {
|
||||
if newName, ok := metrics.MetricsUnderTransition[name]; ok {
|
||||
return ClickHouseFormattedValue([]interface{}{name, newName})
|
||||
return ClickHouseFormattedValue([]interface{}{name, newName}, false)
|
||||
} else {
|
||||
return ClickHouseFormattedValue([]interface{}{name})
|
||||
return ClickHouseFormattedValue([]interface{}{name}, false)
|
||||
}
|
||||
}
|
||||
|
||||
return ClickHouseFormattedValue(v)
|
||||
return ClickHouseFormattedValue(v, false)
|
||||
}
|
||||
|
||||
func AddBackTickToFormatTag(str string) string {
|
||||
|
||||
@@ -317,9 +317,10 @@ var oneString = "1"
|
||||
var trueBool = true
|
||||
|
||||
var testClickHouseFormattedValueData = []struct {
|
||||
name string
|
||||
value interface{}
|
||||
want interface{}
|
||||
name string
|
||||
value interface{}
|
||||
want interface{}
|
||||
isEscaped bool
|
||||
}{
|
||||
{
|
||||
name: "int",
|
||||
@@ -394,12 +395,21 @@ var testClickHouseFormattedValueData = []struct {
|
||||
},
|
||||
want: "['test\\'1','test\\'2']",
|
||||
},
|
||||
{
|
||||
name: "[]interface{} with string with single quote escaped",
|
||||
value: []interface{}{
|
||||
`test\\'1`,
|
||||
`test\\'2`,
|
||||
},
|
||||
isEscaped: true,
|
||||
want: `['test\\'1','test\\'2']`,
|
||||
},
|
||||
}
|
||||
|
||||
func TestClickHouseFormattedValue(t *testing.T) {
|
||||
for _, tt := range testClickHouseFormattedValueData {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := ClickHouseFormattedValue(tt.value)
|
||||
got := ClickHouseFormattedValue(tt.value, tt.isEscaped)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("ClickHouseFormattedValue() = %v, want %v", got, tt.want)
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@ func NewTestSqliteDB(t *testing.T) (sqlStore sqlstore.SQLStore, testDBFilePath s
|
||||
sqlmigration.NewUpdateDashboardAndSavedViewsFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePatAndOrgDomainsFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePipelines(sqlStore),
|
||||
sqlmigration.NewAddVirtualFieldsFactory(),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -69,6 +69,7 @@ func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedM
|
||||
sqlmigration.NewUpdatePreferencesFactory(sqlstore),
|
||||
sqlmigration.NewUpdateApdexTtlFactory(sqlstore),
|
||||
sqlmigration.NewUpdateResetPasswordFactory(sqlstore),
|
||||
sqlmigration.NewAddVirtualFieldsFactory(),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/cache"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/featurecontrol"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/sqlmigration"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/web"
|
||||
)
|
||||
@@ -27,8 +25,6 @@ type SigNoz struct {
|
||||
TelemetryStore telemetrystore.TelemetryStore
|
||||
Prometheus prometheus.Prometheus
|
||||
Alertmanager alertmanager.Alertmanager
|
||||
Zeus zeus.Zeus
|
||||
FeatureControl featurecontrol.FeatureControl
|
||||
}
|
||||
|
||||
func New(
|
||||
|
||||
@@ -66,7 +66,7 @@ func (migration *updatePatAndOrgDomains) Up(ctx context.Context, db *bun.DB) err
|
||||
}
|
||||
}
|
||||
|
||||
if err := updateOrgId(ctx, tx); err != nil {
|
||||
if err := updateOrgId(ctx, tx, "org_domains"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func (migration *updatePatAndOrgDomains) Down(ctx context.Context, db *bun.DB) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateOrgId(ctx context.Context, tx bun.Tx) error {
|
||||
func updateOrgId(ctx context.Context, tx bun.Tx, table string) error {
|
||||
if _, err := tx.NewCreateTable().
|
||||
Model(&struct {
|
||||
bun.BaseModel `bun:"table:org_domains_new"`
|
||||
|
||||
58
pkg/sqlmigration/025_add_virtual_fields.go
Normal file
58
pkg/sqlmigration/025_add_virtual_fields.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type addVirtualFields struct{}
|
||||
|
||||
func NewAddVirtualFieldsFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_virtual_fields"), newAddVirtualFields)
|
||||
}
|
||||
|
||||
func newAddVirtualFields(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
|
||||
return &addVirtualFields{}, nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Up(ctx context.Context, db *bun.DB) error {
|
||||
// table:virtual_field op:create
|
||||
if _, err := db.NewCreateTable().
|
||||
Model(&struct {
|
||||
bun.BaseModel `bun:"table:virtual_field"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
Name string `bun:"name,type:text,notnull"`
|
||||
Expression string `bun:"expression,type:text,notnull"`
|
||||
Description string `bun:"description,type:text"`
|
||||
Signal telemetrytypes.Signal `bun:"signal,type:text,notnull"`
|
||||
OrgID string `bun:"org_id,type:text,notnull"`
|
||||
}{}).
|
||||
ForeignKey(`("org_id") REFERENCES "organizations" ("id") ON DELETE CASCADE`).
|
||||
IfNotExists().
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
275
pkg/telemetrylogs/condition_builder.go
Normal file
275
pkg/telemetrylogs/condition_builder.go
Normal file
@@ -0,0 +1,275 @@
|
||||
package telemetrylogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
logsV2Columns = map[string]*schema.Column{
|
||||
"ts_bucket_start": {Name: "ts_bucket_start", Type: schema.ColumnTypeUInt64},
|
||||
"resource_fingerprint": {Name: "resource_fingerprint", Type: schema.ColumnTypeString},
|
||||
|
||||
"timestamp": {Name: "timestamp", Type: schema.ColumnTypeUInt64},
|
||||
"observed_timestamp": {Name: "observed_timestamp", Type: schema.ColumnTypeUInt64},
|
||||
"id": {Name: "id", Type: schema.ColumnTypeString},
|
||||
"trace_id": {Name: "trace_id", Type: schema.ColumnTypeString},
|
||||
"span_id": {Name: "span_id", Type: schema.ColumnTypeString},
|
||||
"trace_flags": {Name: "trace_flags", Type: schema.ColumnTypeUInt32},
|
||||
"severity_text": {Name: "severity_text", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"severity_number": {Name: "severity_number", Type: schema.ColumnTypeUInt8},
|
||||
"body": {Name: "body", Type: schema.ColumnTypeString},
|
||||
"attributes_string": {Name: "attributes_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes_number": {Name: "attributes_number", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}},
|
||||
"attributes_bool": {Name: "attributes_bool", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}},
|
||||
"resources_string": {Name: "resources_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"scope_name": {Name: "scope_name", Type: schema.ColumnTypeString},
|
||||
"scope_version": {Name: "scope_version", Type: schema.ColumnTypeString},
|
||||
"scope_string": {Name: "scope_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
var _ qbtypes.ConditionBuilder = &conditionBuilder{}
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return logsV2Columns["resources_string"], nil
|
||||
case telemetrytypes.FieldContextScope:
|
||||
switch key.Name {
|
||||
case "name", "scope.name", "scope_name":
|
||||
return logsV2Columns["scope_name"], nil
|
||||
case "version", "scope.version", "scope_version":
|
||||
return logsV2Columns["scope_version"], nil
|
||||
}
|
||||
return logsV2Columns["scope_string"], nil
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
switch key.FieldDataType {
|
||||
case telemetrytypes.FieldDataTypeString:
|
||||
return logsV2Columns["attributes_string"], nil
|
||||
case telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeNumber:
|
||||
return logsV2Columns["attributes_number"], nil
|
||||
case telemetrytypes.FieldDataTypeBool:
|
||||
return logsV2Columns["attributes_bool"], nil
|
||||
}
|
||||
case telemetrytypes.FieldContextLog:
|
||||
col, ok := logsV2Columns[key.Name]
|
||||
if !ok {
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
return col, nil
|
||||
}
|
||||
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeUInt8:
|
||||
return column.Name, nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
// should not reach here
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThan:
|
||||
return sb.G(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThanOrEq:
|
||||
return sb.GE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThan:
|
||||
return sb.LT(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThanOrEq:
|
||||
return sb.LE(tblFieldName, value), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.Like(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.NotLike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.ILike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.NotILike(tblFieldName, value), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
// between and not between
|
||||
case qbtypes.FilterOperatorBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.Between(tblFieldName, values[0], values[1]), nil
|
||||
case qbtypes.FilterOperatorNotBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.NotBetween(tblFieldName, values[0], values[1]), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.In(tblFieldName, values...), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.NotIn(tblFieldName, values...), nil
|
||||
|
||||
// exists and not exists
|
||||
// but how could you live and have no story to tell
|
||||
// in the UI based query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
var value any
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString, schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}:
|
||||
value = ""
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.ColumnTypeUInt64, schema.ColumnTypeUInt32, schema.ColumnTypeUInt8:
|
||||
value = 0
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if key.Materialized {
|
||||
leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
|
||||
}
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("exists operator is not supported for column type %s", column.Type)
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("unsupported operator: %v", operator)
|
||||
}
|
||||
620
pkg/telemetrylogs/condition_builder_test.go
Normal file
620
pkg/telemetrylogs/condition_builder_test.go
Normal file
@@ -0,0 +1,620 @@
|
||||
package telemetrylogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetColumn(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedCol *schema.Column
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Resource field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedCol: logsV2Columns["resources_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope.name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope.name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope_name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope_name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - version",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "version",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_version"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - other scope field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "custom.scope.field",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - string type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - number type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - int64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeInt64,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - float64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "cpu.utilization",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - bool type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_bool"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: logsV2Columns["timestamp"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: logsV2Columns["body"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - nonexistent",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "did_user_login",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "did_user_login",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_bool"],
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
col, err := conditionBuilder.GetColumn(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedCol, col)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Simple column type - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "timestamp",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes_string['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes_number['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes_bool['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resources_string['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "error message",
|
||||
expectedSQL: "body = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotEqual,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThan,
|
||||
value: float64(100),
|
||||
expectedSQL: "attributes_number['request.duration'] > ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThan,
|
||||
value: float64(1024),
|
||||
expectedSQL: "attributes_number['request.size'] < ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp >= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Like operator - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLike,
|
||||
value: "%error%",
|
||||
expectedSQL: "body LIKE ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Like operator - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotLike,
|
||||
value: "%error%",
|
||||
expectedSQL: "body NOT LIKE ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) NOT LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Contains operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorContains,
|
||||
value: "admin",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: "invalid",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Between operator - insufficient values",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000)},
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Not Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp NOT BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - severity_text",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: []any{"error", "fatal", "critical"},
|
||||
expectedSQL: "severity_text IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: "error",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrInValues,
|
||||
},
|
||||
{
|
||||
name: "Not In operator - severity_text",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotIn,
|
||||
value: []any{"debug", "info", "trace"},
|
||||
expectedSQL: "severity_text NOT IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - string field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "body <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - string field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "body = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - number field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "value",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConditionMultiple(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
keys []*telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
keys: []*telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "error message",
|
||||
expectedSQL: "body = ? AND severity_text = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var err error
|
||||
for _, key := range tc.keys {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting condition for key %s: %v", key.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user