mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-11 03:54:28 +00:00
Compare commits
52 Commits
licensing
...
v0.77.2-in
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c3811d83c6 | ||
|
|
b44720a2dd | ||
|
|
85eb759ae5 | ||
|
|
b80a68f265 | ||
|
|
c0f01e4cb9 | ||
|
|
fed84cb50a | ||
|
|
80545c4d07 | ||
|
|
0b1faec092 | ||
|
|
9ef9e522a1 | ||
|
|
c57268030e | ||
|
|
62c315da55 | ||
|
|
41b1c72309 | ||
|
|
ba6f31b1c3 | ||
|
|
eed92978a4 | ||
|
|
41cbd316b5 | ||
|
|
91925bc9a0 | ||
|
|
8d7d33393d | ||
|
|
5217771af2 | ||
|
|
28a01876b3 | ||
|
|
760fb76b8d | ||
|
|
8d143b44b1 | ||
|
|
423aebd6eb | ||
|
|
8d630707af | ||
|
|
a5b52431b7 | ||
|
|
0138d757c8 | ||
|
|
a881a2b0e0 | ||
|
|
844195b84f | ||
|
|
8ff05b2e8f | ||
|
|
205d9e45e3 | ||
|
|
f15bf15826 | ||
|
|
c8c56c544e | ||
|
|
1c43655336 | ||
|
|
c269c8c6b8 | ||
|
|
8b42a3d64d | ||
|
|
f34ebc2bc7 | ||
|
|
3142b6cc6d | ||
|
|
58e141685a | ||
|
|
e17f63a50c | ||
|
|
838ef5dcc5 | ||
|
|
010369f11c | ||
|
|
52ae3ad8c7 | ||
|
|
570152d497 | ||
|
|
03d9460671 | ||
|
|
68731f9b25 | ||
|
|
e53d3d1269 | ||
|
|
0dd76eca8a | ||
|
|
893bd20a2d | ||
|
|
176bcbf466 | ||
|
|
9e498ddcc0 | ||
|
|
666a1fd0c7 | ||
|
|
8bc2f3e5b7 | ||
|
|
33458514d3 |
81
.github/workflows/build-community.yaml
vendored
Normal file
81
.github/workflows/build-community.yaml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: build-community
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_NAME: signoz-community
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./pkg/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./pkg/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: dockerhub
|
||||
113
.github/workflows/build-enterprise.yaml
vendored
Normal file
113
.github/workflows/build-enterprise.yaml
vendored
Normal file
@@ -0,0 +1,113 @@
|
||||
name: build-enterprise
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
docker_providers: ${{ steps.set-docker-providers.outputs.providers }}
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
- name: set-docker-providers
|
||||
id: set-docker-providers
|
||||
run: |
|
||||
if [[ ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+$ || ${{ github.event.ref }} =~ ^refs/tags/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
|
||||
echo "providers=dockerhub gcp" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "providers=gcp" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: ${{ needs.prepare.outputs.docker_providers }}
|
||||
131
.github/workflows/build-staging.yaml
vendored
Normal file
131
.github/workflows/build-staging.yaml
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
name: build-staging
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
env:
|
||||
PRIMUS_HOME: .primus
|
||||
MAKE: make --no-print-directory --makefile=.primus/src/make/main.mk
|
||||
|
||||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
outputs:
|
||||
version: ${{ steps.build-info.outputs.version }}
|
||||
hash: ${{ steps.build-info.outputs.hash }}
|
||||
time: ${{ steps.build-info.outputs.time }}
|
||||
branch: ${{ steps.build-info.outputs.branch }}
|
||||
deployment: ${{ steps.build-info.outputs.deployment }}
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- id: token
|
||||
name: github-token-gen
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.PRIMUS_APP_ID }}
|
||||
private-key: ${{ secrets.PRIMUS_PRIVATE_KEY }}
|
||||
owner: ${{ github.repository_owner }}
|
||||
- name: primus-checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: signoz/primus
|
||||
ref: main
|
||||
path: .primus
|
||||
token: ${{ steps.token.outputs.token }}
|
||||
- name: build-info
|
||||
id: build-info
|
||||
run: |
|
||||
echo "version=$($MAKE info-version)" >> $GITHUB_OUTPUT
|
||||
echo "hash=$($MAKE info-commit-short)" >> $GITHUB_OUTPUT
|
||||
echo "time=$($MAKE info-timestamp)" >> $GITHUB_OUTPUT
|
||||
echo "branch=$($MAKE info-branch)" >> $GITHUB_OUTPUT
|
||||
|
||||
staging_label="${{ github.event.label.name }}"
|
||||
if [[ "${staging_label}" == "staging:"* ]]; then
|
||||
deployment=${staging_label#"staging:"}
|
||||
elif [[ "${{ github.event.ref }}" == "refs/heads/main" ]]; then
|
||||
deployment="staging"
|
||||
else
|
||||
echo "error: not able to determine deployment - please verify the PR label or the branch"
|
||||
exit 1
|
||||
fi
|
||||
echo "deployment=${deployment}" >> $GITHUB_OUTPUT
|
||||
- name: create-dotenv
|
||||
run: |
|
||||
mkdir -p frontend
|
||||
echo 'CI=1' > frontend/.env
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' >> frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: frontend/.env
|
||||
key: dotenv-${{ github.sha }}
|
||||
js-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/js-build.yaml@main
|
||||
needs: prepare
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
uses: signoz/primus.workflows/.github/workflows/go-build.yaml@main
|
||||
needs: [prepare, js-build]
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GO_INPUT_ARTIFACT_CACHE_KEY: jsbuild-${{ github.sha }}
|
||||
GO_INPUT_ARTIFACT_PATH: frontend/build
|
||||
GO_BUILD_CONTEXT: ./ee/query-service
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./ee/query-service/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
DOCKER_PROVIDERS: gcp
|
||||
staging:
|
||||
if: ${{ contains(github.event.label.name, 'staging:') || github.event.ref == 'refs/heads/main' }}
|
||||
uses: signoz/primus.workflows/.github/workflows/github-trigger.yaml@main
|
||||
secrets: inherit
|
||||
needs: [prepare, go-build]
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
GITHUB_ENVIRONMENT: staging
|
||||
GITHUB_SILENT: true
|
||||
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
|
||||
GITHUB_EVENT_NAME: releaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"
|
||||
122
.github/workflows/build.yaml
vendored
122
.github/workflows/build.yaml
vendored
@@ -1,122 +0,0 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
enterprise:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: docker-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: create-env-file
|
||||
run: |
|
||||
echo 'INTERCOM_APP_ID="${{ secrets.INTERCOM_APP_ID }}"' > frontend/.env
|
||||
echo 'SEGMENT_ID="${{ secrets.SEGMENT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_AUTH_TOKEN="${{ secrets.SENTRY_AUTH_TOKEN }}"' >> frontend/.env
|
||||
echo 'SENTRY_ORG="${{ secrets.SENTRY_ORG }}"' >> frontend/.env
|
||||
echo 'SENTRY_PROJECT_ID="${{ secrets.SENTRY_PROJECT_ID }}"' >> frontend/.env
|
||||
echo 'SENTRY_DSN="${{ secrets.SENTRY_DSN }}"' >> frontend/.env
|
||||
echo 'TUNNEL_URL="${{ secrets.TUNNEL_URL }}"' >> frontend/.env
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_ID="${{ secrets.CUSTOMERIO_ID }}"' >> frontend/.env
|
||||
echo 'CUSTOMERIO_SITE_ID="${{ secrets.CUSTOMERIO_SITE_ID }}"' >> frontend/.env
|
||||
- name: github-ref-info
|
||||
shell: bash
|
||||
run: |
|
||||
GH_REF=${{ github.ref }}
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
PREFIX="refs/tags/"
|
||||
echo "GH_IS_TAG=true" >> $GITHUB_ENV
|
||||
echo "GH_TAG=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
else
|
||||
PREFIX="refs/heads/"
|
||||
echo "GH_IS_TAG=false" >> $GITHUB_ENV
|
||||
echo "GH_BRANCH_NAME=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: set-version
|
||||
run: |
|
||||
if [ '${{ env.GH_IS_TAG }}' == 'true' ]; then
|
||||
echo "VERSION=${{ env.GH_TAG }}" >> $GITHUB_ENV
|
||||
elif [ '${{ env.GH_BRANCH_NAME }}' == 'main' ]; then
|
||||
echo "VERSION=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "VERSION=${{ env.GH_BRANCH_NAME }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: cross-compilation-tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: publish
|
||||
run: make docker-buildx-enterprise
|
||||
|
||||
community:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup-go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.22"
|
||||
- name: setup-qemu
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: setup-buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
version: latest
|
||||
- name: docker-login
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: github-ref-info
|
||||
shell: bash
|
||||
run: |
|
||||
GH_REF=${{ github.ref }}
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
PREFIX="refs/tags/"
|
||||
echo "GH_IS_TAG=true" >> $GITHUB_ENV
|
||||
echo "GH_TAG=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
else
|
||||
PREFIX="refs/heads/"
|
||||
echo "GH_IS_TAG=false" >> $GITHUB_ENV
|
||||
echo "GH_BRANCH_NAME=${GH_REF#$PREFIX}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: set-version
|
||||
run: |
|
||||
if [ '${{ env.GH_IS_TAG }}' == 'true' ]; then
|
||||
echo "VERSION=${{ env.GH_TAG }}" >> $GITHUB_ENV
|
||||
elif [ '${{ env.GH_BRANCH_NAME }}' == 'main' ]; then
|
||||
echo "VERSION=latest" >> $GITHUB_ENV
|
||||
else
|
||||
echo "VERSION=${{ env.GH_BRANCH_NAME }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: cross-compilation-tools
|
||||
run: |
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: publish
|
||||
run: make docker-buildx-community
|
||||
13
.github/workflows/staging-deployment.yaml
vendored
13
.github/workflows/staging-deployment.yaml
vendored
@@ -36,12 +36,17 @@ jobs:
|
||||
echo "GITHUB_BRANCH: ${GITHUB_BRANCH}"
|
||||
echo "GITHUB_SHA: ${GITHUB_SHA}"
|
||||
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export OTELCOL_TAG="main"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
export KAFKA_SPAN_EVAL="true"
|
||||
docker system prune --force
|
||||
docker pull signoz/signoz-otel-collector:main
|
||||
docker pull signoz/signoz-schema-migrator:main
|
||||
docker system prune --force --all
|
||||
OTELCOL_TAG=$(curl -s https://api.github.com/repos/SigNoz/signoz-otel-collector/releases/latest | jq -r '.tag_name // "not-found"')
|
||||
if [[ "${OTELCOL_TAG}" == "not-found" ]]; then
|
||||
echo "warning: unable to determine latest SigNoz OtelCollector release tag, skipping latest otelcol deployment"
|
||||
else
|
||||
export OTELCOL_TAG=${OTELCOL_TAG}
|
||||
docker pull signoz/signoz-otel-collector:${OTELCOL_TAG}
|
||||
docker pull signoz/signoz-schema-migrator:${OTELCOL_TAG}
|
||||
fi
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
2
.github/workflows/testing-deployment.yaml
vendored
2
.github/workflows/testing-deployment.yaml
vendored
@@ -38,7 +38,7 @@ jobs:
|
||||
export VERSION="${GITHUB_SHA:0:7}" # needed for child process to access it
|
||||
export DEV_BUILD="1"
|
||||
export PATH="/usr/local/go/bin/:$PATH" # needed for Golang to work
|
||||
docker system prune --force
|
||||
docker system prune --force --all
|
||||
cd ~/signoz
|
||||
git status
|
||||
git add .
|
||||
|
||||
17
.versions/alpine
Normal file
17
.versions/alpine
Normal file
@@ -0,0 +1,17 @@
|
||||
#### Auto generated by make docker-version-alpine. DO NOT EDIT! ####
|
||||
amd64=029a752048e32e843bd6defe3841186fb8d19a28dae8ec287f433bb9d6d1ad85
|
||||
unknown=5fea95373b9ec85974843f31446fa6a9df4492dddae4e1cb056193c34a20a5be
|
||||
arm=b4aef1a899e0271f06d948c9a8fa626ecdb2202d3a178bc14775dd559e23df8e
|
||||
unknown=a4d1e27e63a9d6353046eb25a2f0ec02945012b217f4364cd83a73fe6dfb0b15
|
||||
arm=4fdafe217d0922f3c3e2b4f64cf043f8403a4636685cd9c51fea2cbd1f419740
|
||||
unknown=7f21ac2018d95b2c51a5779c1d5ca6c327504adc3b0fdc747a6725d30b3f13c2
|
||||
arm64=ea3c5a9671f7b3f7eb47eab06f73bc6591df978b0d5955689a9e6f943aa368c0
|
||||
unknown=a8ba68c1a9e6eea8041b4b8f996c235163440808b9654a865976fdcbede0f433
|
||||
386=dea9f02e103e837849f984d5679305c758aba7fea1b95b7766218597f61a05ab
|
||||
unknown=3c6629bec05c8273a927d46b77428bf4a378dad911a0ae284887becdc149b734
|
||||
ppc64le=0880443bffa028dfbbc4094a32dd6b7ac25684e4c0a3d50da9e0acae355c5eaf
|
||||
unknown=bb48308f976b266e3ab39bbf9af84521959bd9c295d3c763690cf41f8df2a626
|
||||
riscv64=d76e6fbe348ff20c2931bb7f101e49379648e026de95dd37f96e00ce1909dcf7
|
||||
unknown=dd807544365f6dc187cbe6de0806adce2ea9de3e7124717d1d8e8b7a18b77b64
|
||||
s390x=b815fadf80495594eb6296a6af0bc647ae5f193e0044e07acec7e5b378c9ce2d
|
||||
unknown=74681be74a280a88abb53ff1e048eb1fb624b30d0066730df6d8afd02ba82e01
|
||||
@@ -174,7 +174,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.77.0
|
||||
image: signoz/signoz:v0.78.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
@@ -208,7 +208,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.37
|
||||
image: signoz/signoz-otel-collector:v0.111.38
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -232,7 +232,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.37
|
||||
image: signoz/signoz-schema-migrator:v0.111.38
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.77.0
|
||||
image: signoz/signoz:v0.78.1
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
- --use-logs-new-schema=true
|
||||
@@ -143,7 +143,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.111.37
|
||||
image: signoz/signoz-otel-collector:v0.111.38
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -167,7 +167,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.111.37
|
||||
image: signoz/signoz-schema-migrator:v0.111.38
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -177,7 +177,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.77.0}
|
||||
image: signoz/signoz:${VERSION:-v0.78.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -212,7 +212,7 @@ services:
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -238,7 +238,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -249,7 +249,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.77.0}
|
||||
image: signoz/signoz:${VERSION:-v0.78.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -146,7 +146,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -168,7 +168,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -180,7 +180,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.77.0}
|
||||
image: signoz/signoz:${VERSION:-v0.78.1}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -144,7 +144,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -166,7 +166,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -178,7 +178,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.37}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.111.38}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -18,4 +18,4 @@ COPY frontend/build/ /etc/signoz/web/
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
22
ee/query-service/Dockerfile.multi-arch
Normal file
22
ee/query-service/Dockerfile.multi-arch
Normal file
@@ -0,0 +1,22 @@
|
||||
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
|
||||
|
||||
FROM alpine@sha256:${ALPINE_SHA}
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG ARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY ./target/${OS}-${ARCH}/signoz /root/signoz
|
||||
COPY ./conf/prometheus.yml /root/config/prometheus.yml
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["./signoz"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
@@ -153,9 +153,11 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (*types.User, *basemodel.ApiError) {
|
||||
cloudIntegrationUserId := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
cloudIntegrationUser := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
email := fmt.Sprintf("%s@signoz.io", cloudIntegrationUser)
|
||||
|
||||
integrationUserResult, apiErr := ah.AppDao().GetUser(ctx, cloudIntegrationUserId)
|
||||
// TODO(nitya): there should be orgId here
|
||||
integrationUserResult, apiErr := ah.AppDao().GetUserByEmail(ctx, email)
|
||||
if apiErr != nil {
|
||||
return nil, basemodel.WrapApiError(apiErr, "couldn't look for integration user")
|
||||
}
|
||||
@@ -170,9 +172,9 @@ func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
)
|
||||
|
||||
newUser := &types.User{
|
||||
ID: cloudIntegrationUserId,
|
||||
Name: fmt.Sprintf("%s integration", cloudProvider),
|
||||
Email: fmt.Sprintf("%s@signoz.io", cloudIntegrationUserId),
|
||||
ID: uuid.New().String(),
|
||||
Name: cloudIntegrationUser,
|
||||
Email: email,
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
},
|
||||
|
||||
@@ -5,16 +5,18 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
"github.com/SigNoz/signoz/ee/types"
|
||||
eeTypes "github.com/SigNoz/signoz/ee/types"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
||||
baseconstants "github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
"go.uber.org/zap"
|
||||
@@ -58,7 +60,7 @@ func (ah *APIHandler) createPAT(w http.ResponseWriter, r *http.Request) {
|
||||
ah.Respond(w, &pat)
|
||||
}
|
||||
|
||||
func validatePATRequest(req types.GettablePAT) error {
|
||||
func validatePATRequest(req eeTypes.GettablePAT) error {
|
||||
if req.Role == "" || (req.Role != baseconstants.ViewerGroup && req.Role != baseconstants.EditorGroup && req.Role != baseconstants.AdminGroup) {
|
||||
return fmt.Errorf("valid role is required")
|
||||
}
|
||||
@@ -74,12 +76,19 @@ func validatePATRequest(req types.GettablePAT) error {
|
||||
func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := context.Background()
|
||||
|
||||
req := types.GettablePAT{}
|
||||
req := eeTypes.GettablePAT{}
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
return
|
||||
}
|
||||
|
||||
idStr := mux.Vars(r)["id"]
|
||||
id, err := valuer.NewUUID(idStr)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "id is not a valid uuid-v7"))
|
||||
return
|
||||
}
|
||||
|
||||
user, err := auth.GetUserFromReqContext(r.Context())
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
@@ -89,6 +98,25 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
//get the pat
|
||||
existingPAT, paterr := ah.AppDao().GetPATByID(ctx, user.OrgID, id)
|
||||
if paterr != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, paterr.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// get the user
|
||||
createdByUser, usererr := ah.AppDao().GetUser(ctx, existingPAT.UserID)
|
||||
if usererr != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, usererr.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email)) {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user pat cannot be updated"))
|
||||
return
|
||||
}
|
||||
|
||||
err = validatePATRequest(req)
|
||||
if err != nil {
|
||||
RespondError(w, model.BadRequest(err), nil)
|
||||
@@ -96,12 +124,6 @@ func (ah *APIHandler) updatePAT(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
req.UpdatedByUserID = user.ID
|
||||
idStr := mux.Vars(r)["id"]
|
||||
id, err := valuer.NewUUID(idStr)
|
||||
if err != nil {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "id is not a valid uuid-v7"))
|
||||
return
|
||||
}
|
||||
req.UpdatedAt = time.Now()
|
||||
zap.L().Info("Got Update PAT request", zap.Any("pat", req))
|
||||
var apierr basemodel.BaseApiError
|
||||
@@ -149,6 +171,25 @@ func (ah *APIHandler) revokePAT(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
//get the pat
|
||||
existingPAT, paterr := ah.AppDao().GetPATByID(ctx, user.OrgID, id)
|
||||
if paterr != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, paterr.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
// get the user
|
||||
createdByUser, usererr := ah.AppDao().GetUser(ctx, existingPAT.UserID)
|
||||
if usererr != nil {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, usererr.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email)) {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user pat cannot be updated"))
|
||||
return
|
||||
}
|
||||
|
||||
zap.L().Info("Revoke PAT with id", zap.String("id", id.StringValue()))
|
||||
if apierr := ah.AppDao().RevokePAT(ctx, user.OrgID, id, user.ID); apierr != nil {
|
||||
RespondError(w, apierr, nil)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
basedao "github.com/SigNoz/signoz/pkg/query-service/dao"
|
||||
baseint "github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
ossTypes "github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
@@ -40,7 +39,6 @@ type ModelDao interface {
|
||||
UpdatePAT(ctx context.Context, orgID string, p types.GettablePAT, id valuer.UUID) basemodel.BaseApiError
|
||||
GetPAT(ctx context.Context, pat string) (*types.GettablePAT, basemodel.BaseApiError)
|
||||
GetPATByID(ctx context.Context, orgID string, id valuer.UUID) (*types.GettablePAT, basemodel.BaseApiError)
|
||||
GetUserByPAT(ctx context.Context, orgID string, token string) (*ossTypes.GettableUser, basemodel.BaseApiError)
|
||||
ListPATs(ctx context.Context, orgID string) ([]types.GettablePAT, basemodel.BaseApiError)
|
||||
RevokePAT(ctx context.Context, orgID string, id valuer.UUID, userID string) basemodel.BaseApiError
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func (m *modelDao) createUserForSAMLRequest(ctx context.Context, email string) (
|
||||
}
|
||||
|
||||
user := &types.User{
|
||||
ID: uuid.NewString(),
|
||||
ID: uuid.New().String(),
|
||||
Name: "",
|
||||
Email: email,
|
||||
Password: hash,
|
||||
|
||||
@@ -196,27 +196,3 @@ func (m *modelDao) GetPATByID(ctx context.Context, orgID string, id valuer.UUID)
|
||||
|
||||
return &patWithUser, nil
|
||||
}
|
||||
|
||||
// deprecated
|
||||
func (m *modelDao) GetUserByPAT(ctx context.Context, orgID string, token string) (*ossTypes.GettableUser, basemodel.BaseApiError) {
|
||||
users := []ossTypes.GettableUser{}
|
||||
|
||||
if err := m.DB().NewSelect().
|
||||
Model(&users).
|
||||
Column("u.id", "u.name", "u.email", "u.password", "u.created_at", "u.profile_picture_url", "u.org_id", "u.group_id").
|
||||
Join("JOIN personal_access_tokens p ON u.id = p.user_id").
|
||||
Where("p.token = ?", token).
|
||||
Where("p.expires_at >= strftime('%s', 'now')").
|
||||
Where("p.org_id = ?", orgID).
|
||||
Scan(ctx); err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf("failed to fetch user from PAT, err: %v", err))
|
||||
}
|
||||
|
||||
if len(users) != 1 {
|
||||
return nil, &model.ApiError{
|
||||
Typ: model.ErrorInternal,
|
||||
Err: fmt.Errorf("found zero or multiple users with same PAT token"),
|
||||
}
|
||||
}
|
||||
return &users[0], nil
|
||||
}
|
||||
|
||||
@@ -157,8 +157,6 @@ func NewLicenseV3(data map[string]interface{}) (*LicenseV3, error) {
|
||||
}
|
||||
|
||||
switch planName {
|
||||
case PlanNameTeams:
|
||||
features = append(features, ProPlan...)
|
||||
case PlanNameEnterprise:
|
||||
features = append(features, EnterprisePlan...)
|
||||
case PlanNameBasic:
|
||||
|
||||
@@ -74,21 +74,21 @@ func TestNewLicenseV3(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Parse the entire license properly",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"ENTERPRISE"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"category": "FREE",
|
||||
"status": "ACTIVE",
|
||||
"valid_from": float64(1730899309),
|
||||
"valid_until": float64(-1),
|
||||
},
|
||||
PlanName: PlanNameTeams,
|
||||
PlanName: PlanNameEnterprise,
|
||||
ValidFrom: 1730899309,
|
||||
ValidUntil: -1,
|
||||
Status: "ACTIVE",
|
||||
@@ -98,14 +98,14 @@ func TestNewLicenseV3(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Fallback to basic plan if license status is invalid",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INVALID","plan":{"name":"TEAMS"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"INVALID","plan":{"name":"ENTERPRISE"},"valid_from": 1730899309,"valid_until": -1}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"category": "FREE",
|
||||
"status": "INVALID",
|
||||
@@ -122,21 +122,21 @@ func TestNewLicenseV3(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "fallback states for validFrom and validUntil",
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"TEAMS"},"valid_from":1234.456,"valid_until":5678.567}`),
|
||||
data: []byte(`{"id":"does-not-matter","key":"does-not-matter-key","category":"FREE","status":"ACTIVE","plan":{"name":"ENTERPRISE"},"valid_from":1234.456,"valid_until":5678.567}`),
|
||||
pass: true,
|
||||
expected: &LicenseV3{
|
||||
ID: "does-not-matter",
|
||||
Key: "does-not-matter-key",
|
||||
Data: map[string]interface{}{
|
||||
"plan": map[string]interface{}{
|
||||
"name": "TEAMS",
|
||||
"name": "ENTERPRISE",
|
||||
},
|
||||
"valid_from": 1234.456,
|
||||
"valid_until": 5678.567,
|
||||
"category": "FREE",
|
||||
"status": "ACTIVE",
|
||||
},
|
||||
PlanName: PlanNameTeams,
|
||||
PlanName: PlanNameEnterprise,
|
||||
ValidFrom: 1234,
|
||||
ValidUntil: 5678,
|
||||
Status: "ACTIVE",
|
||||
|
||||
@@ -1,30 +1,26 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
)
|
||||
|
||||
const SSO = "SSO"
|
||||
const Basic = "BASIC_PLAN"
|
||||
const Pro = "PRO_PLAN"
|
||||
const Enterprise = "ENTERPRISE_PLAN"
|
||||
|
||||
var (
|
||||
PlanNameEnterprise = "ENTERPRISE"
|
||||
PlanNameTeams = "TEAMS"
|
||||
PlanNameBasic = "BASIC"
|
||||
)
|
||||
|
||||
var (
|
||||
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameTeams: Pro, PlanNameEnterprise: Enterprise}
|
||||
MapOldPlanKeyToNewPlanName map[string]string = map[string]string{PlanNameBasic: Basic, PlanNameEnterprise: Enterprise}
|
||||
)
|
||||
|
||||
var (
|
||||
LicenseStatusInvalid = "INVALID"
|
||||
)
|
||||
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
const Onboarding = "ONBOARDING"
|
||||
const ChatSupport = "CHAT_SUPPORT"
|
||||
const Gateway = "GATEWAY"
|
||||
@@ -38,83 +34,6 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: DisableUpsell,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
@@ -143,135 +62,6 @@ var BasicPlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
}
|
||||
|
||||
var ProPlan = basemodel.FeatureSet{
|
||||
basemodel.Feature{
|
||||
Name: SSO,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: Gateway,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: PremiumSupport,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AnomalyDetection,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
@@ -289,76 +79,6 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.OSS,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.CustomMetricsFunction,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.AlertChannelMsTeams,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.UseSpanMetrics,
|
||||
Active: false,
|
||||
@@ -401,13 +121,6 @@ var EnterprisePlan = basemodel.FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.HostsInfraMonitoring,
|
||||
Active: constants.EnableHostsInfraMonitoring(),
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
basemodel.Feature{
|
||||
Name: basemodel.TraceFunnels,
|
||||
Active: false,
|
||||
|
||||
@@ -17,13 +17,15 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
Org = "org"
|
||||
User = "user"
|
||||
Org = "org"
|
||||
User = "user"
|
||||
CloudIntegration = "cloud_integration"
|
||||
)
|
||||
|
||||
var (
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
CloudIntegrationReference = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
|
||||
)
|
||||
|
||||
type dialect struct {
|
||||
@@ -211,6 +213,8 @@ func (dialect *dialect) RenameTableAndModifyModel(ctx context.Context, bun bun.I
|
||||
fkReferences = append(fkReferences, OrgReference)
|
||||
} else if reference == User && !slices.Contains(fkReferences, UserReference) {
|
||||
fkReferences = append(fkReferences, UserReference)
|
||||
} else if reference == CloudIntegration && !slices.Contains(fkReferences, CloudIntegrationReference) {
|
||||
fkReferences = append(fkReferences, CloudIntegrationReference)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,9 +11,12 @@ const logEvent = async (
|
||||
rateLimited?: boolean,
|
||||
): Promise<SuccessResponse<EventSuccessPayloadProps> | ErrorResponse> => {
|
||||
try {
|
||||
// add tenant_url to attributes
|
||||
const { hostname } = window.location;
|
||||
const updatedAttributes = { ...attributes, tenant_url: hostname };
|
||||
const response = await axios.post('/event', {
|
||||
eventName,
|
||||
attributes,
|
||||
attributes: updatedAttributes,
|
||||
eventType: eventType || 'track',
|
||||
rateLimited: rateLimited || false, // TODO: Update this once we have a proper way to handle rate limiting
|
||||
});
|
||||
|
||||
@@ -1,27 +1,12 @@
|
||||
// keep this consistent with backend constants.go
|
||||
export enum FeatureKeys {
|
||||
SSO = 'SSO',
|
||||
ENTERPRISE_PLAN = 'ENTERPRISE_PLAN',
|
||||
BASIC_PLAN = 'BASIC_PLAN',
|
||||
ALERT_CHANNEL_SLACK = 'ALERT_CHANNEL_SLACK',
|
||||
ALERT_CHANNEL_WEBHOOK = 'ALERT_CHANNEL_WEBHOOK',
|
||||
ALERT_CHANNEL_PAGERDUTY = 'ALERT_CHANNEL_PAGERDUTY',
|
||||
ALERT_CHANNEL_OPSGENIE = 'ALERT_CHANNEL_OPSGENIE',
|
||||
ALERT_CHANNEL_MSTEAMS = 'ALERT_CHANNEL_MSTEAMS',
|
||||
CUSTOM_METRICS_FUNCTION = 'CUSTOM_METRICS_FUNCTION',
|
||||
QUERY_BUILDER_PANELS = 'QUERY_BUILDER_PANELS',
|
||||
QUERY_BUILDER_ALERTS = 'QUERY_BUILDER_ALERTS',
|
||||
DISABLE_UPSELL = 'DISABLE_UPSELL',
|
||||
USE_SPAN_METRICS = 'USE_SPAN_METRICS',
|
||||
OSS = 'OSS',
|
||||
ONBOARDING = 'ONBOARDING',
|
||||
CHAT_SUPPORT = 'CHAT_SUPPORT',
|
||||
GATEWAY = 'GATEWAY',
|
||||
PREMIUM_SUPPORT = 'PREMIUM_SUPPORT',
|
||||
QUERY_BUILDER_SEARCH_V2 = 'QUERY_BUILDER_SEARCH_V2',
|
||||
ANOMALY_DETECTION = 'ANOMALY_DETECTION',
|
||||
AWS_INTEGRATION = 'AWS_INTEGRATION',
|
||||
ONBOARDING_V3 = 'ONBOARDING_V3',
|
||||
THIRD_PARTY_API = 'THIRD_PARTY_API',
|
||||
TRACE_FUNNELS = 'TRACE_FUNNELS',
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { Form, FormInstance, Input, Select, Switch, Typography } from 'antd';
|
||||
import { Store } from 'antd/lib/form/interface';
|
||||
import { FeatureKeys } from 'constants/features';
|
||||
import ROUTES from 'constants/routes';
|
||||
import {
|
||||
ChannelType,
|
||||
@@ -11,11 +10,8 @@ import {
|
||||
WebhookChannel,
|
||||
} from 'container/CreateAlertChannels/config';
|
||||
import history from 'lib/history';
|
||||
import { useAppContext } from 'providers/App/App';
|
||||
import { Dispatch, ReactElement, SetStateAction } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { FeatureFlagProps } from 'types/api/features/getFeaturesFlags';
|
||||
import { isFeatureKeys } from 'utils/app';
|
||||
|
||||
import EmailSettings from './Settings/Email';
|
||||
import MsTeamsSettings from './Settings/MsTeams';
|
||||
@@ -39,17 +35,6 @@ function FormAlertChannels({
|
||||
editing = false,
|
||||
}: FormAlertChannelsProps): JSX.Element {
|
||||
const { t } = useTranslation('channels');
|
||||
const { featureFlags } = useAppContext();
|
||||
|
||||
const feature = `ALERT_CHANNEL_${type.toUpperCase()}`;
|
||||
|
||||
const featureKey = isFeatureKeys(feature)
|
||||
? feature
|
||||
: FeatureKeys.ALERT_CHANNEL_SLACK;
|
||||
|
||||
const hasFeature = featureFlags?.find(
|
||||
(flag: FeatureFlagProps) => flag.name === featureKey,
|
||||
);
|
||||
|
||||
const renderSettings = (): ReactElement | null => {
|
||||
switch (type) {
|
||||
@@ -146,7 +131,7 @@ function FormAlertChannels({
|
||||
|
||||
<Form.Item>
|
||||
<Button
|
||||
disabled={savingState || !hasFeature}
|
||||
disabled={savingState}
|
||||
loading={savingState}
|
||||
type="primary"
|
||||
onClick={(): void => onSaveHandler(type)}
|
||||
@@ -154,7 +139,7 @@ function FormAlertChannels({
|
||||
{t('button_save_channel')}
|
||||
</Button>
|
||||
<Button
|
||||
disabled={testingState || !hasFeature}
|
||||
disabled={testingState}
|
||||
loading={testingState}
|
||||
onClick={(): void => onTestHandler(type)}
|
||||
>
|
||||
|
||||
@@ -467,10 +467,6 @@ function FormAlertRules({
|
||||
panelType,
|
||||
]);
|
||||
|
||||
const isAlertAvailable =
|
||||
!featureFlags?.find((flag) => flag.name === FeatureKeys.QUERY_BUILDER_ALERTS)
|
||||
?.active || false;
|
||||
|
||||
const saveRule = useCallback(async () => {
|
||||
if (!isFormValid()) {
|
||||
return;
|
||||
@@ -688,11 +684,6 @@ function FormAlertRules({
|
||||
|
||||
const isAlertNameMissing = !formInstance.getFieldValue('alert');
|
||||
|
||||
const isAlertAvailableToSave =
|
||||
isAlertAvailable &&
|
||||
currentQuery.queryType === EQueryType.QUERY_BUILDER &&
|
||||
alertType !== AlertTypes.METRICS_BASED_ALERT;
|
||||
|
||||
const onUnitChangeHandler = (value: string): void => {
|
||||
setYAxisUnit(value);
|
||||
// reset target unit
|
||||
@@ -865,7 +856,6 @@ function FormAlertRules({
|
||||
icon={<SaveOutlined />}
|
||||
disabled={
|
||||
isAlertNameMissing ||
|
||||
isAlertAvailableToSave ||
|
||||
!isChannelConfigurationValid ||
|
||||
queryStatus === 'error'
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import { WarningOutlined } from '@ant-design/icons';
|
||||
import { Button, Flex, Modal, Space, Typography } from 'antd';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import OverlayScrollbar from 'components/OverlayScrollbar/OverlayScrollbar';
|
||||
import { FeatureKeys } from 'constants/features';
|
||||
import { QueryParams } from 'constants/query';
|
||||
import {
|
||||
initialQueriesMap,
|
||||
@@ -27,7 +26,6 @@ import { GetQueryResultsProps } from 'lib/dashboard/getQueryResults';
|
||||
import { cloneDeep, defaultTo, isEmpty, isUndefined } from 'lodash-es';
|
||||
import { Check, X } from 'lucide-react';
|
||||
import { DashboardWidgetPageParams } from 'pages/DashboardWidget';
|
||||
import { useAppContext } from 'providers/App/App';
|
||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||
import {
|
||||
getNextWidgets,
|
||||
@@ -79,8 +77,6 @@ function NewWidget({ selectedGraph }: NewWidgetProps): JSX.Element {
|
||||
|
||||
const { t } = useTranslation(['dashboard']);
|
||||
|
||||
const { featureFlags } = useAppContext();
|
||||
|
||||
const { registerShortcut, deregisterShortcut } = useKeyboardHotkeys();
|
||||
|
||||
const {
|
||||
@@ -566,12 +562,7 @@ function NewWidget({ selectedGraph }: NewWidgetProps): JSX.Element {
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
const isQueryBuilderActive =
|
||||
!featureFlags?.find((flag) => flag.name === FeatureKeys.QUERY_BUILDER_PANELS)
|
||||
?.active || false;
|
||||
|
||||
const isNewTraceLogsAvailable =
|
||||
isQueryBuilderActive &&
|
||||
currentQuery.queryType === EQueryType.QUERY_BUILDER &&
|
||||
currentQuery.builder.queryData.find(
|
||||
(query) => query.dataSource !== DataSource.METRICS,
|
||||
|
||||
@@ -13,11 +13,7 @@ function OrganizationSettings(): JSX.Element {
|
||||
const isNotSSO =
|
||||
!featureFlags?.find((flag) => flag.name === FeatureKeys.SSO)?.active || false;
|
||||
|
||||
const isNoUpSell =
|
||||
!featureFlags?.find((flag) => flag.name === FeatureKeys.DISABLE_UPSELL)
|
||||
?.active || false;
|
||||
|
||||
const isAuthDomain = !isNoUpSell || (isNoUpSell && !isNotSSO);
|
||||
const isAuthDomain = !isNotSSO;
|
||||
|
||||
if (!org) {
|
||||
return <div />;
|
||||
|
||||
@@ -284,16 +284,6 @@ function SideNav(): JSX.Element {
|
||||
manageLicenseMenuItem,
|
||||
];
|
||||
|
||||
const isApiMonitoringEnabled = featureFlags?.find(
|
||||
(flag) => flag.name === FeatureKeys.THIRD_PARTY_API,
|
||||
)?.active;
|
||||
|
||||
if (!isApiMonitoringEnabled) {
|
||||
updatedMenuItems = updatedMenuItems.filter(
|
||||
(item) => item.key !== ROUTES.API_MONITORING,
|
||||
);
|
||||
}
|
||||
|
||||
if (isCloudUser || isEnterpriseSelfHostedUser) {
|
||||
const isOnboardingEnabled =
|
||||
featureFlags?.find((feature) => feature.name === FeatureKeys.ONBOARDING)
|
||||
|
||||
@@ -186,76 +186,6 @@ export function getAppContextMock(
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.OSS,
|
||||
active: false,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.DISABLE_UPSELL,
|
||||
active: false,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.CUSTOM_METRICS_FUNCTION,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.QUERY_BUILDER_PANELS,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.QUERY_BUILDER_ALERTS,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.ALERT_CHANNEL_SLACK,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.ALERT_CHANNEL_WEBHOOK,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.ALERT_CHANNEL_PAGERDUTY,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.ALERT_CHANNEL_OPSGENIE,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.ALERT_CHANNEL_MSTEAMS,
|
||||
active: true,
|
||||
usage: 0,
|
||||
usage_limit: -1,
|
||||
route: '',
|
||||
},
|
||||
{
|
||||
name: FeatureKeys.USE_SPAN_METRICS,
|
||||
active: false,
|
||||
|
||||
3
go.mod
3
go.mod
@@ -28,6 +28,7 @@ require (
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gorilla/websocket v1.5.0
|
||||
github.com/gosimple/slug v1.10.0
|
||||
github.com/huandu/go-sqlbuilder v1.35.0
|
||||
github.com/jackc/pgx/v5 v5.7.2
|
||||
github.com/jmoiron/sqlx v1.3.4
|
||||
github.com/json-iterator/go v1.1.12
|
||||
@@ -91,6 +92,7 @@ require (
|
||||
github.com/ClickHouse/ch-go v0.61.5 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 // indirect
|
||||
github.com/armon/go-metrics v0.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||
@@ -150,6 +152,7 @@ require (
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/memberlist v0.5.1 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
|
||||
8
go.sum
8
go.sum
@@ -113,6 +113,8 @@ github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ=
|
||||
github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw=
|
||||
github.com/antonmedv/expr v1.15.3 h1:q3hOJZNvLvhqE8OHBs1cFRdbXFNKuA+bHmRaI+AmRmI=
|
||||
github.com/antonmedv/expr v1.15.3/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
@@ -537,6 +539,12 @@ github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOF
|
||||
github.com/hjson/hjson-go/v4 v4.0.0 h1:wlm6IYYqHjOdXH1gHev4VoXCaW20HdQAGCxdOEEg2cs=
|
||||
github.com/hjson/hjson-go/v4 v4.0.0/go.mod h1:KaYt3bTw3zhBjYqnXkYywcYctk0A2nxeEFTse3rH13E=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/go-assert v1.1.6 h1:oaAfYxq9KNDi9qswn/6aE0EydfxSa+tWZC1KabNitYs=
|
||||
github.com/huandu/go-assert v1.1.6/go.mod h1:JuIfbmYG9ykwvuxoJ3V8TB5QP+3+ajIA54Y44TmkMxs=
|
||||
github.com/huandu/go-sqlbuilder v1.35.0 h1:ESvxFHN8vxCTudY1Vq63zYpU5yJBESn19sf6k4v2T5Q=
|
||||
github.com/huandu/go-sqlbuilder v1.35.0/go.mod h1:mS0GAtrtW+XL6nM2/gXHRJax2RwSW1TraavWDFAc1JA=
|
||||
github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU=
|
||||
github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
|
||||
|
||||
221
grammar/FilterQuery.g4
Normal file
221
grammar/FilterQuery.g4
Normal file
@@ -0,0 +1,221 @@
|
||||
grammar FilterQuery;
|
||||
|
||||
/*
|
||||
* Parser Rules
|
||||
*/
|
||||
|
||||
query
|
||||
: expression
|
||||
EOF
|
||||
;
|
||||
|
||||
// Expression with standard boolean precedence:
|
||||
// - parentheses > NOT > AND > OR
|
||||
// - consecutive expressions with no AND/OR => implicit AND
|
||||
expression
|
||||
: orExpression
|
||||
;
|
||||
|
||||
// OR expressions
|
||||
orExpression
|
||||
: andExpression ( OR andExpression )*
|
||||
;
|
||||
|
||||
// AND expressions + optional chaining with implicit AND if no OR is present
|
||||
andExpression
|
||||
: unaryExpression ( AND unaryExpression | unaryExpression )*
|
||||
;
|
||||
|
||||
// A unary expression handles optional NOT
|
||||
unaryExpression
|
||||
: NOT? primary
|
||||
;
|
||||
|
||||
// Primary constructs: grouped expressions, a comparison (key op value),
|
||||
// a function call, or a full-text string
|
||||
primary
|
||||
: LPAREN orExpression RPAREN
|
||||
| comparison
|
||||
| functionCall
|
||||
| fullText
|
||||
| key
|
||||
;
|
||||
|
||||
/*
|
||||
* Comparison-like filters
|
||||
*
|
||||
* Includes all operators: =, !=, <>, <, <=, >, >=, [NOT] LIKE, [NOT] ILIKE,
|
||||
* [NOT] BETWEEN, [NOT] IN, [NOT] EXISTS, [NOT] REGEXP, [NOT] CONTAINS, etc.
|
||||
*/
|
||||
comparison
|
||||
: key EQUALS value
|
||||
| key (NOT_EQUALS | NEQ) value
|
||||
| key LT value
|
||||
| key LE value
|
||||
| key GT value
|
||||
| key GE value
|
||||
|
||||
| key (LIKE | ILIKE) value
|
||||
| key (NOT_LIKE | NOT_ILIKE) value
|
||||
|
||||
| key BETWEEN value AND value
|
||||
| key NOT BETWEEN value AND value
|
||||
|
||||
| key inClause
|
||||
| key notInClause
|
||||
|
||||
| key EXISTS
|
||||
| key NOT EXISTS
|
||||
|
||||
| key REGEXP value
|
||||
| key NOT REGEXP value
|
||||
|
||||
| key CONTAINS value
|
||||
| key NOT CONTAINS value
|
||||
;
|
||||
|
||||
// in(...) or in[...]
|
||||
inClause
|
||||
: IN LPAREN valueList RPAREN
|
||||
| IN LBRACK valueList RBRACK
|
||||
;
|
||||
|
||||
notInClause
|
||||
: NOT IN LPAREN valueList RPAREN
|
||||
| NOT IN LBRACK valueList RBRACK
|
||||
;
|
||||
|
||||
// List of values for in(...) or in[...]
|
||||
valueList
|
||||
: value ( COMMA value )*
|
||||
;
|
||||
|
||||
// Full-text search: a standalone quoted string is allowed as a "primary"
|
||||
// e.g. `"Waiting for response" http.status_code=200`
|
||||
fullText
|
||||
: QUOTED_TEXT
|
||||
| FREETEXT
|
||||
;
|
||||
|
||||
/*
|
||||
* Function calls like:
|
||||
* has(payload.user_ids, 123)
|
||||
* hasAny(payload.user_ids, [123, 456])
|
||||
* ...
|
||||
*/
|
||||
functionCall
|
||||
: (HAS | HASANY | HASALL | HASNONE) LPAREN functionParamList RPAREN
|
||||
;
|
||||
|
||||
// Function parameters can be keys, single scalar values, or arrays
|
||||
functionParamList
|
||||
: functionParam ( COMMA functionParam )*
|
||||
;
|
||||
|
||||
functionParam
|
||||
: key
|
||||
| value
|
||||
| array
|
||||
;
|
||||
|
||||
// An array: [ item1, item2, item3 ]
|
||||
array
|
||||
: LBRACK valueList RBRACK
|
||||
;
|
||||
|
||||
/*
|
||||
* A 'value' can be a string literal (double or single-quoted),
|
||||
// a numeric literal, boolean, or a "bare" token as needed.
|
||||
*/
|
||||
value
|
||||
: QUOTED_TEXT
|
||||
| NUMBER
|
||||
| BOOL
|
||||
| KEY
|
||||
;
|
||||
|
||||
/*
|
||||
* A key can include letters, digits, underscores, dots, brackets
|
||||
* E.g. service.name, query_log.query_duration_ms, proto.user_objects[].name
|
||||
*/
|
||||
key
|
||||
: KEY
|
||||
;
|
||||
|
||||
|
||||
/*
|
||||
* Lexer Rules
|
||||
*/
|
||||
|
||||
// Common punctuation / symbols
|
||||
LPAREN : '(' ;
|
||||
RPAREN : ')' ;
|
||||
LBRACK : '[' ;
|
||||
RBRACK : ']' ;
|
||||
COMMA : ',' ;
|
||||
|
||||
EQUALS : '=' | '==' ;
|
||||
NOT_EQUALS : '!=' ;
|
||||
NEQ : '<>' ; // alternate not-equals operator
|
||||
LT : '<' ;
|
||||
LE : '<=' ;
|
||||
GT : '>' ;
|
||||
GE : '>=' ;
|
||||
|
||||
// Operators that are made of multiple keywords
|
||||
LIKE : [Ll][Ii][Kk][Ee] ;
|
||||
NOT_LIKE : [Nn][Oo][Tt] [ \t]+ [Ll][Ii][Kk][Ee] ;
|
||||
ILIKE : [Ii][Ll][Ii][Kk][Ee] ;
|
||||
NOT_ILIKE : [Nn][Oo][Tt] [ \t]+ [Ii][Ll][Ii][Kk][Ee] ;
|
||||
BETWEEN : [Bb][Ee][Tt][Ww][Ee][Ee][Nn] ;
|
||||
EXISTS : [Ee][Xx][Ii][Ss][Tt][Ss]? ;
|
||||
REGEXP : [Rr][Ee][Gg][Ee][Xx][Pp] ;
|
||||
CONTAINS : [Cc][Oo][Nn][Tt][Aa][Ii][Nn][Ss]? ;
|
||||
IN : [Ii][Nn] ;
|
||||
|
||||
// Boolean logic
|
||||
NOT : [Nn][Oo][Tt] ;
|
||||
AND : [Aa][Nn][Dd] ;
|
||||
OR : [Oo][Rr] ;
|
||||
|
||||
// For easy referencing in function calls
|
||||
HAS : [Hh][Aa][Ss] ;
|
||||
HASANY : [Hh][Aa][Ss][Aa][Nn][Yy] ;
|
||||
HASALL : [Hh][Aa][Ss][Aa][Ll][Ll] ;
|
||||
HASNONE : [Hh][Aa][Ss][Nn][Oo][Nn][Ee] ;
|
||||
|
||||
// Potential boolean constants
|
||||
BOOL
|
||||
: [Tt][Rr][Uu][Ee]
|
||||
| [Ff][Aa][Ll][Ss][Ee]
|
||||
;
|
||||
|
||||
// Numbers (integer or float). Adjust as needed for your domain.
|
||||
NUMBER
|
||||
: DIGIT+ ( '.' DIGIT+ )?
|
||||
;
|
||||
|
||||
// Double/single-quoted text, capturing full text search strings, values, etc.
|
||||
QUOTED_TEXT
|
||||
: ( '"' ( ~["\\] | '\\' . )* '"' // double-quoted
|
||||
| '\'' ( ~['\\] | '\\' . )* '\'' // single-quoted
|
||||
)
|
||||
;
|
||||
|
||||
// Keys can have letters, digits, underscores, dots, and bracket pairs
|
||||
// e.g. service.name, service.namespace, db.queries[].query_duration
|
||||
KEY
|
||||
: [a-zA-Z0-9_] [a-zA-Z0-9_.[\]]*
|
||||
;
|
||||
|
||||
// Ignore whitespace
|
||||
WS
|
||||
: [ \t\r\n]+ -> skip
|
||||
;
|
||||
|
||||
// Digits used by NUMBER
|
||||
fragment DIGIT
|
||||
: [0-9]
|
||||
;
|
||||
|
||||
FREETEXT : (~[ \t\r\n=()'"<>![\]])+ ;
|
||||
96
pkg/parser/grammar/FilterQuery.interp
Normal file
96
pkg/parser/grammar/FilterQuery.interp
Normal file
File diff suppressed because one or more lines are too long
45
pkg/parser/grammar/FilterQuery.tokens
Normal file
45
pkg/parser/grammar/FilterQuery.tokens
Normal file
@@ -0,0 +1,45 @@
|
||||
LPAREN=1
|
||||
RPAREN=2
|
||||
LBRACK=3
|
||||
RBRACK=4
|
||||
COMMA=5
|
||||
EQUALS=6
|
||||
NOT_EQUALS=7
|
||||
NEQ=8
|
||||
LT=9
|
||||
LE=10
|
||||
GT=11
|
||||
GE=12
|
||||
LIKE=13
|
||||
NOT_LIKE=14
|
||||
ILIKE=15
|
||||
NOT_ILIKE=16
|
||||
BETWEEN=17
|
||||
EXISTS=18
|
||||
REGEXP=19
|
||||
CONTAINS=20
|
||||
IN=21
|
||||
NOT=22
|
||||
AND=23
|
||||
OR=24
|
||||
HAS=25
|
||||
HASANY=26
|
||||
HASALL=27
|
||||
HASNONE=28
|
||||
BOOL=29
|
||||
NUMBER=30
|
||||
QUOTED_TEXT=31
|
||||
KEY=32
|
||||
WS=33
|
||||
FREETEXT=34
|
||||
'('=1
|
||||
')'=2
|
||||
'['=3
|
||||
']'=4
|
||||
','=5
|
||||
'!='=7
|
||||
'<>'=8
|
||||
'<'=9
|
||||
'<='=10
|
||||
'>'=11
|
||||
'>='=12
|
||||
120
pkg/parser/grammar/FilterQueryLexer.interp
Normal file
120
pkg/parser/grammar/FilterQueryLexer.interp
Normal file
File diff suppressed because one or more lines are too long
45
pkg/parser/grammar/FilterQueryLexer.tokens
Normal file
45
pkg/parser/grammar/FilterQueryLexer.tokens
Normal file
@@ -0,0 +1,45 @@
|
||||
LPAREN=1
|
||||
RPAREN=2
|
||||
LBRACK=3
|
||||
RBRACK=4
|
||||
COMMA=5
|
||||
EQUALS=6
|
||||
NOT_EQUALS=7
|
||||
NEQ=8
|
||||
LT=9
|
||||
LE=10
|
||||
GT=11
|
||||
GE=12
|
||||
LIKE=13
|
||||
NOT_LIKE=14
|
||||
ILIKE=15
|
||||
NOT_ILIKE=16
|
||||
BETWEEN=17
|
||||
EXISTS=18
|
||||
REGEXP=19
|
||||
CONTAINS=20
|
||||
IN=21
|
||||
NOT=22
|
||||
AND=23
|
||||
OR=24
|
||||
HAS=25
|
||||
HASANY=26
|
||||
HASALL=27
|
||||
HASNONE=28
|
||||
BOOL=29
|
||||
NUMBER=30
|
||||
QUOTED_TEXT=31
|
||||
KEY=32
|
||||
WS=33
|
||||
FREETEXT=34
|
||||
'('=1
|
||||
')'=2
|
||||
'['=3
|
||||
']'=4
|
||||
','=5
|
||||
'!='=7
|
||||
'<>'=8
|
||||
'<'=9
|
||||
'<='=10
|
||||
'>'=11
|
||||
'>='=12
|
||||
124
pkg/parser/grammar/filterquery_base_listener.go
Normal file
124
pkg/parser/grammar/filterquery_base_listener.go
Normal file
@@ -0,0 +1,124 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
// BaseFilterQueryListener is a complete listener for a parse tree produced by FilterQueryParser.
|
||||
type BaseFilterQueryListener struct{}
|
||||
|
||||
var _ FilterQueryListener = &BaseFilterQueryListener{}
|
||||
|
||||
// VisitTerminal is called when a terminal node is visited.
|
||||
func (s *BaseFilterQueryListener) VisitTerminal(node antlr.TerminalNode) {}
|
||||
|
||||
// VisitErrorNode is called when an error node is visited.
|
||||
func (s *BaseFilterQueryListener) VisitErrorNode(node antlr.ErrorNode) {}
|
||||
|
||||
// EnterEveryRule is called when any rule is entered.
|
||||
func (s *BaseFilterQueryListener) EnterEveryRule(ctx antlr.ParserRuleContext) {}
|
||||
|
||||
// ExitEveryRule is called when any rule is exited.
|
||||
func (s *BaseFilterQueryListener) ExitEveryRule(ctx antlr.ParserRuleContext) {}
|
||||
|
||||
// EnterQuery is called when production query is entered.
|
||||
func (s *BaseFilterQueryListener) EnterQuery(ctx *QueryContext) {}
|
||||
|
||||
// ExitQuery is called when production query is exited.
|
||||
func (s *BaseFilterQueryListener) ExitQuery(ctx *QueryContext) {}
|
||||
|
||||
// EnterExpression is called when production expression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterExpression(ctx *ExpressionContext) {}
|
||||
|
||||
// ExitExpression is called when production expression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitExpression(ctx *ExpressionContext) {}
|
||||
|
||||
// EnterOrExpression is called when production orExpression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterOrExpression(ctx *OrExpressionContext) {}
|
||||
|
||||
// ExitOrExpression is called when production orExpression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitOrExpression(ctx *OrExpressionContext) {}
|
||||
|
||||
// EnterAndExpression is called when production andExpression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterAndExpression(ctx *AndExpressionContext) {}
|
||||
|
||||
// ExitAndExpression is called when production andExpression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitAndExpression(ctx *AndExpressionContext) {}
|
||||
|
||||
// EnterUnaryExpression is called when production unaryExpression is entered.
|
||||
func (s *BaseFilterQueryListener) EnterUnaryExpression(ctx *UnaryExpressionContext) {}
|
||||
|
||||
// ExitUnaryExpression is called when production unaryExpression is exited.
|
||||
func (s *BaseFilterQueryListener) ExitUnaryExpression(ctx *UnaryExpressionContext) {}
|
||||
|
||||
// EnterPrimary is called when production primary is entered.
|
||||
func (s *BaseFilterQueryListener) EnterPrimary(ctx *PrimaryContext) {}
|
||||
|
||||
// ExitPrimary is called when production primary is exited.
|
||||
func (s *BaseFilterQueryListener) ExitPrimary(ctx *PrimaryContext) {}
|
||||
|
||||
// EnterComparison is called when production comparison is entered.
|
||||
func (s *BaseFilterQueryListener) EnterComparison(ctx *ComparisonContext) {}
|
||||
|
||||
// ExitComparison is called when production comparison is exited.
|
||||
func (s *BaseFilterQueryListener) ExitComparison(ctx *ComparisonContext) {}
|
||||
|
||||
// EnterInClause is called when production inClause is entered.
|
||||
func (s *BaseFilterQueryListener) EnterInClause(ctx *InClauseContext) {}
|
||||
|
||||
// ExitInClause is called when production inClause is exited.
|
||||
func (s *BaseFilterQueryListener) ExitInClause(ctx *InClauseContext) {}
|
||||
|
||||
// EnterNotInClause is called when production notInClause is entered.
|
||||
func (s *BaseFilterQueryListener) EnterNotInClause(ctx *NotInClauseContext) {}
|
||||
|
||||
// ExitNotInClause is called when production notInClause is exited.
|
||||
func (s *BaseFilterQueryListener) ExitNotInClause(ctx *NotInClauseContext) {}
|
||||
|
||||
// EnterValueList is called when production valueList is entered.
|
||||
func (s *BaseFilterQueryListener) EnterValueList(ctx *ValueListContext) {}
|
||||
|
||||
// ExitValueList is called when production valueList is exited.
|
||||
func (s *BaseFilterQueryListener) ExitValueList(ctx *ValueListContext) {}
|
||||
|
||||
// EnterFullText is called when production fullText is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFullText(ctx *FullTextContext) {}
|
||||
|
||||
// ExitFullText is called when production fullText is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFullText(ctx *FullTextContext) {}
|
||||
|
||||
// EnterFunctionCall is called when production functionCall is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFunctionCall(ctx *FunctionCallContext) {}
|
||||
|
||||
// ExitFunctionCall is called when production functionCall is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFunctionCall(ctx *FunctionCallContext) {}
|
||||
|
||||
// EnterFunctionParamList is called when production functionParamList is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFunctionParamList(ctx *FunctionParamListContext) {}
|
||||
|
||||
// ExitFunctionParamList is called when production functionParamList is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFunctionParamList(ctx *FunctionParamListContext) {}
|
||||
|
||||
// EnterFunctionParam is called when production functionParam is entered.
|
||||
func (s *BaseFilterQueryListener) EnterFunctionParam(ctx *FunctionParamContext) {}
|
||||
|
||||
// ExitFunctionParam is called when production functionParam is exited.
|
||||
func (s *BaseFilterQueryListener) ExitFunctionParam(ctx *FunctionParamContext) {}
|
||||
|
||||
// EnterArray is called when production array is entered.
|
||||
func (s *BaseFilterQueryListener) EnterArray(ctx *ArrayContext) {}
|
||||
|
||||
// ExitArray is called when production array is exited.
|
||||
func (s *BaseFilterQueryListener) ExitArray(ctx *ArrayContext) {}
|
||||
|
||||
// EnterValue is called when production value is entered.
|
||||
func (s *BaseFilterQueryListener) EnterValue(ctx *ValueContext) {}
|
||||
|
||||
// ExitValue is called when production value is exited.
|
||||
func (s *BaseFilterQueryListener) ExitValue(ctx *ValueContext) {}
|
||||
|
||||
// EnterKey is called when production key is entered.
|
||||
func (s *BaseFilterQueryListener) EnterKey(ctx *KeyContext) {}
|
||||
|
||||
// ExitKey is called when production key is exited.
|
||||
func (s *BaseFilterQueryListener) ExitKey(ctx *KeyContext) {}
|
||||
77
pkg/parser/grammar/filterquery_base_visitor.go
Normal file
77
pkg/parser/grammar/filterquery_base_visitor.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
type BaseFilterQueryVisitor struct {
|
||||
*antlr.BaseParseTreeVisitor
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitQuery(ctx *QueryContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitExpression(ctx *ExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitOrExpression(ctx *OrExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitAndExpression(ctx *AndExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitUnaryExpression(ctx *UnaryExpressionContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitPrimary(ctx *PrimaryContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitComparison(ctx *ComparisonContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitInClause(ctx *InClauseContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitNotInClause(ctx *NotInClauseContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitValueList(ctx *ValueListContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFullText(ctx *FullTextContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFunctionCall(ctx *FunctionCallContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFunctionParamList(ctx *FunctionParamListContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitFunctionParam(ctx *FunctionParamContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitArray(ctx *ArrayContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitValue(ctx *ValueContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
|
||||
func (v *BaseFilterQueryVisitor) VisitKey(ctx *KeyContext) interface{} {
|
||||
return v.VisitChildren(ctx)
|
||||
}
|
||||
271
pkg/parser/grammar/filterquery_lexer.go
Normal file
271
pkg/parser/grammar/filterquery_lexer.go
Normal file
@@ -0,0 +1,271 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/antlr4-go/antlr/v4"
|
||||
"sync"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// Suppress unused import error
|
||||
var _ = fmt.Printf
|
||||
var _ = sync.Once{}
|
||||
var _ = unicode.IsLetter
|
||||
|
||||
type FilterQueryLexer struct {
|
||||
*antlr.BaseLexer
|
||||
channelNames []string
|
||||
modeNames []string
|
||||
// TODO: EOF string
|
||||
}
|
||||
|
||||
var FilterQueryLexerLexerStaticData struct {
|
||||
once sync.Once
|
||||
serializedATN []int32
|
||||
ChannelNames []string
|
||||
ModeNames []string
|
||||
LiteralNames []string
|
||||
SymbolicNames []string
|
||||
RuleNames []string
|
||||
PredictionContextCache *antlr.PredictionContextCache
|
||||
atn *antlr.ATN
|
||||
decisionToDFA []*antlr.DFA
|
||||
}
|
||||
|
||||
func filterquerylexerLexerInit() {
|
||||
staticData := &FilterQueryLexerLexerStaticData
|
||||
staticData.ChannelNames = []string{
|
||||
"DEFAULT_TOKEN_CHANNEL", "HIDDEN",
|
||||
}
|
||||
staticData.ModeNames = []string{
|
||||
"DEFAULT_MODE",
|
||||
}
|
||||
staticData.LiteralNames = []string{
|
||||
"", "'('", "')'", "'['", "']'", "','", "", "'!='", "'<>'", "'<'", "'<='",
|
||||
"'>'", "'>='",
|
||||
}
|
||||
staticData.SymbolicNames = []string{
|
||||
"", "LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
|
||||
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "NOT_LIKE", "ILIKE", "NOT_ILIKE",
|
||||
"BETWEEN", "EXISTS", "REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR",
|
||||
"HAS", "HASANY", "HASALL", "HASNONE", "BOOL", "NUMBER", "QUOTED_TEXT",
|
||||
"KEY", "WS", "FREETEXT",
|
||||
}
|
||||
staticData.RuleNames = []string{
|
||||
"LPAREN", "RPAREN", "LBRACK", "RBRACK", "COMMA", "EQUALS", "NOT_EQUALS",
|
||||
"NEQ", "LT", "LE", "GT", "GE", "LIKE", "NOT_LIKE", "ILIKE", "NOT_ILIKE",
|
||||
"BETWEEN", "EXISTS", "REGEXP", "CONTAINS", "IN", "NOT", "AND", "OR",
|
||||
"HAS", "HASANY", "HASALL", "HASNONE", "BOOL", "NUMBER", "QUOTED_TEXT",
|
||||
"KEY", "WS", "DIGIT", "FREETEXT",
|
||||
}
|
||||
staticData.PredictionContextCache = antlr.NewPredictionContextCache()
|
||||
staticData.serializedATN = []int32{
|
||||
4, 0, 34, 280, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
|
||||
4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
|
||||
10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
|
||||
7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
|
||||
20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
|
||||
2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
|
||||
31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 1, 0, 1, 0, 1, 1,
|
||||
1, 1, 1, 2, 1, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 5, 3, 5, 85, 8,
|
||||
5, 1, 6, 1, 6, 1, 6, 1, 7, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1,
|
||||
10, 1, 10, 1, 11, 1, 11, 1, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13,
|
||||
1, 13, 1, 13, 1, 13, 4, 13, 112, 8, 13, 11, 13, 12, 13, 113, 1, 13, 1,
|
||||
13, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15,
|
||||
1, 15, 1, 15, 1, 15, 4, 15, 131, 8, 15, 11, 15, 12, 15, 132, 1, 15, 1,
|
||||
15, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16, 1, 16,
|
||||
1, 16, 1, 16, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 1, 17, 3, 17, 155, 8,
|
||||
17, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 18, 1, 19, 1, 19, 1, 19,
|
||||
1, 19, 1, 19, 1, 19, 1, 19, 1, 19, 3, 19, 172, 8, 19, 1, 20, 1, 20, 1,
|
||||
20, 1, 21, 1, 21, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23,
|
||||
1, 23, 1, 24, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 25, 1, 25, 1, 25, 1,
|
||||
25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27,
|
||||
1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1,
|
||||
28, 1, 28, 1, 28, 1, 28, 1, 28, 3, 28, 223, 8, 28, 1, 29, 4, 29, 226, 8,
|
||||
29, 11, 29, 12, 29, 227, 1, 29, 1, 29, 4, 29, 232, 8, 29, 11, 29, 12, 29,
|
||||
233, 3, 29, 236, 8, 29, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 242, 8, 30,
|
||||
10, 30, 12, 30, 245, 9, 30, 1, 30, 1, 30, 1, 30, 1, 30, 1, 30, 5, 30, 252,
|
||||
8, 30, 10, 30, 12, 30, 255, 9, 30, 1, 30, 3, 30, 258, 8, 30, 1, 31, 1,
|
||||
31, 5, 31, 262, 8, 31, 10, 31, 12, 31, 265, 9, 31, 1, 32, 4, 32, 268, 8,
|
||||
32, 11, 32, 12, 32, 269, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 4, 34, 277,
|
||||
8, 34, 11, 34, 12, 34, 278, 0, 0, 35, 1, 1, 3, 2, 5, 3, 7, 4, 9, 5, 11,
|
||||
6, 13, 7, 15, 8, 17, 9, 19, 10, 21, 11, 23, 12, 25, 13, 27, 14, 29, 15,
|
||||
31, 16, 33, 17, 35, 18, 37, 19, 39, 20, 41, 21, 43, 22, 45, 23, 47, 24,
|
||||
49, 25, 51, 26, 53, 27, 55, 28, 57, 29, 59, 30, 61, 31, 63, 32, 65, 33,
|
||||
67, 0, 69, 34, 1, 0, 29, 2, 0, 76, 76, 108, 108, 2, 0, 73, 73, 105, 105,
|
||||
2, 0, 75, 75, 107, 107, 2, 0, 69, 69, 101, 101, 2, 0, 78, 78, 110, 110,
|
||||
2, 0, 79, 79, 111, 111, 2, 0, 84, 84, 116, 116, 2, 0, 9, 9, 32, 32, 2,
|
||||
0, 66, 66, 98, 98, 2, 0, 87, 87, 119, 119, 2, 0, 88, 88, 120, 120, 2, 0,
|
||||
83, 83, 115, 115, 2, 0, 82, 82, 114, 114, 2, 0, 71, 71, 103, 103, 2, 0,
|
||||
80, 80, 112, 112, 2, 0, 67, 67, 99, 99, 2, 0, 65, 65, 97, 97, 2, 0, 68,
|
||||
68, 100, 100, 2, 0, 72, 72, 104, 104, 2, 0, 89, 89, 121, 121, 2, 0, 85,
|
||||
85, 117, 117, 2, 0, 70, 70, 102, 102, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39,
|
||||
92, 92, 4, 0, 48, 57, 65, 90, 95, 95, 97, 122, 6, 0, 46, 46, 48, 57, 65,
|
||||
91, 93, 93, 95, 95, 97, 122, 3, 0, 9, 10, 13, 13, 32, 32, 1, 0, 48, 57,
|
||||
7, 0, 9, 10, 13, 13, 32, 34, 39, 41, 60, 62, 91, 91, 93, 93, 295, 0, 1,
|
||||
1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 5, 1, 0, 0, 0, 0, 7, 1, 0, 0, 0, 0, 9,
|
||||
1, 0, 0, 0, 0, 11, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0,
|
||||
17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 23, 1, 0, 0, 0,
|
||||
0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0,
|
||||
0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0,
|
||||
0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1,
|
||||
0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55,
|
||||
1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0,
|
||||
63, 1, 0, 0, 0, 0, 65, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 1, 71, 1, 0, 0, 0,
|
||||
3, 73, 1, 0, 0, 0, 5, 75, 1, 0, 0, 0, 7, 77, 1, 0, 0, 0, 9, 79, 1, 0, 0,
|
||||
0, 11, 84, 1, 0, 0, 0, 13, 86, 1, 0, 0, 0, 15, 89, 1, 0, 0, 0, 17, 92,
|
||||
1, 0, 0, 0, 19, 94, 1, 0, 0, 0, 21, 97, 1, 0, 0, 0, 23, 99, 1, 0, 0, 0,
|
||||
25, 102, 1, 0, 0, 0, 27, 107, 1, 0, 0, 0, 29, 120, 1, 0, 0, 0, 31, 126,
|
||||
1, 0, 0, 0, 33, 140, 1, 0, 0, 0, 35, 148, 1, 0, 0, 0, 37, 156, 1, 0, 0,
|
||||
0, 39, 163, 1, 0, 0, 0, 41, 173, 1, 0, 0, 0, 43, 176, 1, 0, 0, 0, 45, 180,
|
||||
1, 0, 0, 0, 47, 184, 1, 0, 0, 0, 49, 187, 1, 0, 0, 0, 51, 191, 1, 0, 0,
|
||||
0, 53, 198, 1, 0, 0, 0, 55, 205, 1, 0, 0, 0, 57, 222, 1, 0, 0, 0, 59, 225,
|
||||
1, 0, 0, 0, 61, 257, 1, 0, 0, 0, 63, 259, 1, 0, 0, 0, 65, 267, 1, 0, 0,
|
||||
0, 67, 273, 1, 0, 0, 0, 69, 276, 1, 0, 0, 0, 71, 72, 5, 40, 0, 0, 72, 2,
|
||||
1, 0, 0, 0, 73, 74, 5, 41, 0, 0, 74, 4, 1, 0, 0, 0, 75, 76, 5, 91, 0, 0,
|
||||
76, 6, 1, 0, 0, 0, 77, 78, 5, 93, 0, 0, 78, 8, 1, 0, 0, 0, 79, 80, 5, 44,
|
||||
0, 0, 80, 10, 1, 0, 0, 0, 81, 85, 5, 61, 0, 0, 82, 83, 5, 61, 0, 0, 83,
|
||||
85, 5, 61, 0, 0, 84, 81, 1, 0, 0, 0, 84, 82, 1, 0, 0, 0, 85, 12, 1, 0,
|
||||
0, 0, 86, 87, 5, 33, 0, 0, 87, 88, 5, 61, 0, 0, 88, 14, 1, 0, 0, 0, 89,
|
||||
90, 5, 60, 0, 0, 90, 91, 5, 62, 0, 0, 91, 16, 1, 0, 0, 0, 92, 93, 5, 60,
|
||||
0, 0, 93, 18, 1, 0, 0, 0, 94, 95, 5, 60, 0, 0, 95, 96, 5, 61, 0, 0, 96,
|
||||
20, 1, 0, 0, 0, 97, 98, 5, 62, 0, 0, 98, 22, 1, 0, 0, 0, 99, 100, 5, 62,
|
||||
0, 0, 100, 101, 5, 61, 0, 0, 101, 24, 1, 0, 0, 0, 102, 103, 7, 0, 0, 0,
|
||||
103, 104, 7, 1, 0, 0, 104, 105, 7, 2, 0, 0, 105, 106, 7, 3, 0, 0, 106,
|
||||
26, 1, 0, 0, 0, 107, 108, 7, 4, 0, 0, 108, 109, 7, 5, 0, 0, 109, 111, 7,
|
||||
6, 0, 0, 110, 112, 7, 7, 0, 0, 111, 110, 1, 0, 0, 0, 112, 113, 1, 0, 0,
|
||||
0, 113, 111, 1, 0, 0, 0, 113, 114, 1, 0, 0, 0, 114, 115, 1, 0, 0, 0, 115,
|
||||
116, 7, 0, 0, 0, 116, 117, 7, 1, 0, 0, 117, 118, 7, 2, 0, 0, 118, 119,
|
||||
7, 3, 0, 0, 119, 28, 1, 0, 0, 0, 120, 121, 7, 1, 0, 0, 121, 122, 7, 0,
|
||||
0, 0, 122, 123, 7, 1, 0, 0, 123, 124, 7, 2, 0, 0, 124, 125, 7, 3, 0, 0,
|
||||
125, 30, 1, 0, 0, 0, 126, 127, 7, 4, 0, 0, 127, 128, 7, 5, 0, 0, 128, 130,
|
||||
7, 6, 0, 0, 129, 131, 7, 7, 0, 0, 130, 129, 1, 0, 0, 0, 131, 132, 1, 0,
|
||||
0, 0, 132, 130, 1, 0, 0, 0, 132, 133, 1, 0, 0, 0, 133, 134, 1, 0, 0, 0,
|
||||
134, 135, 7, 1, 0, 0, 135, 136, 7, 0, 0, 0, 136, 137, 7, 1, 0, 0, 137,
|
||||
138, 7, 2, 0, 0, 138, 139, 7, 3, 0, 0, 139, 32, 1, 0, 0, 0, 140, 141, 7,
|
||||
8, 0, 0, 141, 142, 7, 3, 0, 0, 142, 143, 7, 6, 0, 0, 143, 144, 7, 9, 0,
|
||||
0, 144, 145, 7, 3, 0, 0, 145, 146, 7, 3, 0, 0, 146, 147, 7, 4, 0, 0, 147,
|
||||
34, 1, 0, 0, 0, 148, 149, 7, 3, 0, 0, 149, 150, 7, 10, 0, 0, 150, 151,
|
||||
7, 1, 0, 0, 151, 152, 7, 11, 0, 0, 152, 154, 7, 6, 0, 0, 153, 155, 7, 11,
|
||||
0, 0, 154, 153, 1, 0, 0, 0, 154, 155, 1, 0, 0, 0, 155, 36, 1, 0, 0, 0,
|
||||
156, 157, 7, 12, 0, 0, 157, 158, 7, 3, 0, 0, 158, 159, 7, 13, 0, 0, 159,
|
||||
160, 7, 3, 0, 0, 160, 161, 7, 10, 0, 0, 161, 162, 7, 14, 0, 0, 162, 38,
|
||||
1, 0, 0, 0, 163, 164, 7, 15, 0, 0, 164, 165, 7, 5, 0, 0, 165, 166, 7, 4,
|
||||
0, 0, 166, 167, 7, 6, 0, 0, 167, 168, 7, 16, 0, 0, 168, 169, 7, 1, 0, 0,
|
||||
169, 171, 7, 4, 0, 0, 170, 172, 7, 11, 0, 0, 171, 170, 1, 0, 0, 0, 171,
|
||||
172, 1, 0, 0, 0, 172, 40, 1, 0, 0, 0, 173, 174, 7, 1, 0, 0, 174, 175, 7,
|
||||
4, 0, 0, 175, 42, 1, 0, 0, 0, 176, 177, 7, 4, 0, 0, 177, 178, 7, 5, 0,
|
||||
0, 178, 179, 7, 6, 0, 0, 179, 44, 1, 0, 0, 0, 180, 181, 7, 16, 0, 0, 181,
|
||||
182, 7, 4, 0, 0, 182, 183, 7, 17, 0, 0, 183, 46, 1, 0, 0, 0, 184, 185,
|
||||
7, 5, 0, 0, 185, 186, 7, 12, 0, 0, 186, 48, 1, 0, 0, 0, 187, 188, 7, 18,
|
||||
0, 0, 188, 189, 7, 16, 0, 0, 189, 190, 7, 11, 0, 0, 190, 50, 1, 0, 0, 0,
|
||||
191, 192, 7, 18, 0, 0, 192, 193, 7, 16, 0, 0, 193, 194, 7, 11, 0, 0, 194,
|
||||
195, 7, 16, 0, 0, 195, 196, 7, 4, 0, 0, 196, 197, 7, 19, 0, 0, 197, 52,
|
||||
1, 0, 0, 0, 198, 199, 7, 18, 0, 0, 199, 200, 7, 16, 0, 0, 200, 201, 7,
|
||||
11, 0, 0, 201, 202, 7, 16, 0, 0, 202, 203, 7, 0, 0, 0, 203, 204, 7, 0,
|
||||
0, 0, 204, 54, 1, 0, 0, 0, 205, 206, 7, 18, 0, 0, 206, 207, 7, 16, 0, 0,
|
||||
207, 208, 7, 11, 0, 0, 208, 209, 7, 4, 0, 0, 209, 210, 7, 5, 0, 0, 210,
|
||||
211, 7, 4, 0, 0, 211, 212, 7, 3, 0, 0, 212, 56, 1, 0, 0, 0, 213, 214, 7,
|
||||
6, 0, 0, 214, 215, 7, 12, 0, 0, 215, 216, 7, 20, 0, 0, 216, 223, 7, 3,
|
||||
0, 0, 217, 218, 7, 21, 0, 0, 218, 219, 7, 16, 0, 0, 219, 220, 7, 0, 0,
|
||||
0, 220, 221, 7, 11, 0, 0, 221, 223, 7, 3, 0, 0, 222, 213, 1, 0, 0, 0, 222,
|
||||
217, 1, 0, 0, 0, 223, 58, 1, 0, 0, 0, 224, 226, 3, 67, 33, 0, 225, 224,
|
||||
1, 0, 0, 0, 226, 227, 1, 0, 0, 0, 227, 225, 1, 0, 0, 0, 227, 228, 1, 0,
|
||||
0, 0, 228, 235, 1, 0, 0, 0, 229, 231, 5, 46, 0, 0, 230, 232, 3, 67, 33,
|
||||
0, 231, 230, 1, 0, 0, 0, 232, 233, 1, 0, 0, 0, 233, 231, 1, 0, 0, 0, 233,
|
||||
234, 1, 0, 0, 0, 234, 236, 1, 0, 0, 0, 235, 229, 1, 0, 0, 0, 235, 236,
|
||||
1, 0, 0, 0, 236, 60, 1, 0, 0, 0, 237, 243, 5, 34, 0, 0, 238, 242, 8, 22,
|
||||
0, 0, 239, 240, 5, 92, 0, 0, 240, 242, 9, 0, 0, 0, 241, 238, 1, 0, 0, 0,
|
||||
241, 239, 1, 0, 0, 0, 242, 245, 1, 0, 0, 0, 243, 241, 1, 0, 0, 0, 243,
|
||||
244, 1, 0, 0, 0, 244, 246, 1, 0, 0, 0, 245, 243, 1, 0, 0, 0, 246, 258,
|
||||
5, 34, 0, 0, 247, 253, 5, 39, 0, 0, 248, 252, 8, 23, 0, 0, 249, 250, 5,
|
||||
92, 0, 0, 250, 252, 9, 0, 0, 0, 251, 248, 1, 0, 0, 0, 251, 249, 1, 0, 0,
|
||||
0, 252, 255, 1, 0, 0, 0, 253, 251, 1, 0, 0, 0, 253, 254, 1, 0, 0, 0, 254,
|
||||
256, 1, 0, 0, 0, 255, 253, 1, 0, 0, 0, 256, 258, 5, 39, 0, 0, 257, 237,
|
||||
1, 0, 0, 0, 257, 247, 1, 0, 0, 0, 258, 62, 1, 0, 0, 0, 259, 263, 7, 24,
|
||||
0, 0, 260, 262, 7, 25, 0, 0, 261, 260, 1, 0, 0, 0, 262, 265, 1, 0, 0, 0,
|
||||
263, 261, 1, 0, 0, 0, 263, 264, 1, 0, 0, 0, 264, 64, 1, 0, 0, 0, 265, 263,
|
||||
1, 0, 0, 0, 266, 268, 7, 26, 0, 0, 267, 266, 1, 0, 0, 0, 268, 269, 1, 0,
|
||||
0, 0, 269, 267, 1, 0, 0, 0, 269, 270, 1, 0, 0, 0, 270, 271, 1, 0, 0, 0,
|
||||
271, 272, 6, 32, 0, 0, 272, 66, 1, 0, 0, 0, 273, 274, 7, 27, 0, 0, 274,
|
||||
68, 1, 0, 0, 0, 275, 277, 8, 28, 0, 0, 276, 275, 1, 0, 0, 0, 277, 278,
|
||||
1, 0, 0, 0, 278, 276, 1, 0, 0, 0, 278, 279, 1, 0, 0, 0, 279, 70, 1, 0,
|
||||
0, 0, 18, 0, 84, 113, 132, 154, 171, 222, 227, 233, 235, 241, 243, 251,
|
||||
253, 257, 263, 269, 278, 1, 6, 0, 0,
|
||||
}
|
||||
deserializer := antlr.NewATNDeserializer(nil)
|
||||
staticData.atn = deserializer.Deserialize(staticData.serializedATN)
|
||||
atn := staticData.atn
|
||||
staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
|
||||
decisionToDFA := staticData.decisionToDFA
|
||||
for index, state := range atn.DecisionToState {
|
||||
decisionToDFA[index] = antlr.NewDFA(state, index)
|
||||
}
|
||||
}
|
||||
|
||||
// FilterQueryLexerInit initializes any static state used to implement FilterQueryLexer. By default the
|
||||
// static state used to implement the lexer is lazily initialized during the first call to
|
||||
// NewFilterQueryLexer(). You can call this function if you wish to initialize the static state ahead
|
||||
// of time.
|
||||
func FilterQueryLexerInit() {
|
||||
staticData := &FilterQueryLexerLexerStaticData
|
||||
staticData.once.Do(filterquerylexerLexerInit)
|
||||
}
|
||||
|
||||
// NewFilterQueryLexer produces a new lexer instance for the optional input antlr.CharStream.
|
||||
func NewFilterQueryLexer(input antlr.CharStream) *FilterQueryLexer {
|
||||
FilterQueryLexerInit()
|
||||
l := new(FilterQueryLexer)
|
||||
l.BaseLexer = antlr.NewBaseLexer(input)
|
||||
staticData := &FilterQueryLexerLexerStaticData
|
||||
l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.PredictionContextCache)
|
||||
l.channelNames = staticData.ChannelNames
|
||||
l.modeNames = staticData.ModeNames
|
||||
l.RuleNames = staticData.RuleNames
|
||||
l.LiteralNames = staticData.LiteralNames
|
||||
l.SymbolicNames = staticData.SymbolicNames
|
||||
l.GrammarFileName = "FilterQuery.g4"
|
||||
// TODO: l.EOF = antlr.TokenEOF
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
// FilterQueryLexer tokens.
|
||||
const (
|
||||
FilterQueryLexerLPAREN = 1
|
||||
FilterQueryLexerRPAREN = 2
|
||||
FilterQueryLexerLBRACK = 3
|
||||
FilterQueryLexerRBRACK = 4
|
||||
FilterQueryLexerCOMMA = 5
|
||||
FilterQueryLexerEQUALS = 6
|
||||
FilterQueryLexerNOT_EQUALS = 7
|
||||
FilterQueryLexerNEQ = 8
|
||||
FilterQueryLexerLT = 9
|
||||
FilterQueryLexerLE = 10
|
||||
FilterQueryLexerGT = 11
|
||||
FilterQueryLexerGE = 12
|
||||
FilterQueryLexerLIKE = 13
|
||||
FilterQueryLexerNOT_LIKE = 14
|
||||
FilterQueryLexerILIKE = 15
|
||||
FilterQueryLexerNOT_ILIKE = 16
|
||||
FilterQueryLexerBETWEEN = 17
|
||||
FilterQueryLexerEXISTS = 18
|
||||
FilterQueryLexerREGEXP = 19
|
||||
FilterQueryLexerCONTAINS = 20
|
||||
FilterQueryLexerIN = 21
|
||||
FilterQueryLexerNOT = 22
|
||||
FilterQueryLexerAND = 23
|
||||
FilterQueryLexerOR = 24
|
||||
FilterQueryLexerHAS = 25
|
||||
FilterQueryLexerHASANY = 26
|
||||
FilterQueryLexerHASALL = 27
|
||||
FilterQueryLexerHASNONE = 28
|
||||
FilterQueryLexerBOOL = 29
|
||||
FilterQueryLexerNUMBER = 30
|
||||
FilterQueryLexerQUOTED_TEXT = 31
|
||||
FilterQueryLexerKEY = 32
|
||||
FilterQueryLexerWS = 33
|
||||
FilterQueryLexerFREETEXT = 34
|
||||
)
|
||||
112
pkg/parser/grammar/filterquery_listener.go
Normal file
112
pkg/parser/grammar/filterquery_listener.go
Normal file
@@ -0,0 +1,112 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
// FilterQueryListener is a complete listener for a parse tree produced by FilterQueryParser.
|
||||
type FilterQueryListener interface {
|
||||
antlr.ParseTreeListener
|
||||
|
||||
// EnterQuery is called when entering the query production.
|
||||
EnterQuery(c *QueryContext)
|
||||
|
||||
// EnterExpression is called when entering the expression production.
|
||||
EnterExpression(c *ExpressionContext)
|
||||
|
||||
// EnterOrExpression is called when entering the orExpression production.
|
||||
EnterOrExpression(c *OrExpressionContext)
|
||||
|
||||
// EnterAndExpression is called when entering the andExpression production.
|
||||
EnterAndExpression(c *AndExpressionContext)
|
||||
|
||||
// EnterUnaryExpression is called when entering the unaryExpression production.
|
||||
EnterUnaryExpression(c *UnaryExpressionContext)
|
||||
|
||||
// EnterPrimary is called when entering the primary production.
|
||||
EnterPrimary(c *PrimaryContext)
|
||||
|
||||
// EnterComparison is called when entering the comparison production.
|
||||
EnterComparison(c *ComparisonContext)
|
||||
|
||||
// EnterInClause is called when entering the inClause production.
|
||||
EnterInClause(c *InClauseContext)
|
||||
|
||||
// EnterNotInClause is called when entering the notInClause production.
|
||||
EnterNotInClause(c *NotInClauseContext)
|
||||
|
||||
// EnterValueList is called when entering the valueList production.
|
||||
EnterValueList(c *ValueListContext)
|
||||
|
||||
// EnterFullText is called when entering the fullText production.
|
||||
EnterFullText(c *FullTextContext)
|
||||
|
||||
// EnterFunctionCall is called when entering the functionCall production.
|
||||
EnterFunctionCall(c *FunctionCallContext)
|
||||
|
||||
// EnterFunctionParamList is called when entering the functionParamList production.
|
||||
EnterFunctionParamList(c *FunctionParamListContext)
|
||||
|
||||
// EnterFunctionParam is called when entering the functionParam production.
|
||||
EnterFunctionParam(c *FunctionParamContext)
|
||||
|
||||
// EnterArray is called when entering the array production.
|
||||
EnterArray(c *ArrayContext)
|
||||
|
||||
// EnterValue is called when entering the value production.
|
||||
EnterValue(c *ValueContext)
|
||||
|
||||
// EnterKey is called when entering the key production.
|
||||
EnterKey(c *KeyContext)
|
||||
|
||||
// ExitQuery is called when exiting the query production.
|
||||
ExitQuery(c *QueryContext)
|
||||
|
||||
// ExitExpression is called when exiting the expression production.
|
||||
ExitExpression(c *ExpressionContext)
|
||||
|
||||
// ExitOrExpression is called when exiting the orExpression production.
|
||||
ExitOrExpression(c *OrExpressionContext)
|
||||
|
||||
// ExitAndExpression is called when exiting the andExpression production.
|
||||
ExitAndExpression(c *AndExpressionContext)
|
||||
|
||||
// ExitUnaryExpression is called when exiting the unaryExpression production.
|
||||
ExitUnaryExpression(c *UnaryExpressionContext)
|
||||
|
||||
// ExitPrimary is called when exiting the primary production.
|
||||
ExitPrimary(c *PrimaryContext)
|
||||
|
||||
// ExitComparison is called when exiting the comparison production.
|
||||
ExitComparison(c *ComparisonContext)
|
||||
|
||||
// ExitInClause is called when exiting the inClause production.
|
||||
ExitInClause(c *InClauseContext)
|
||||
|
||||
// ExitNotInClause is called when exiting the notInClause production.
|
||||
ExitNotInClause(c *NotInClauseContext)
|
||||
|
||||
// ExitValueList is called when exiting the valueList production.
|
||||
ExitValueList(c *ValueListContext)
|
||||
|
||||
// ExitFullText is called when exiting the fullText production.
|
||||
ExitFullText(c *FullTextContext)
|
||||
|
||||
// ExitFunctionCall is called when exiting the functionCall production.
|
||||
ExitFunctionCall(c *FunctionCallContext)
|
||||
|
||||
// ExitFunctionParamList is called when exiting the functionParamList production.
|
||||
ExitFunctionParamList(c *FunctionParamListContext)
|
||||
|
||||
// ExitFunctionParam is called when exiting the functionParam production.
|
||||
ExitFunctionParam(c *FunctionParamContext)
|
||||
|
||||
// ExitArray is called when exiting the array production.
|
||||
ExitArray(c *ArrayContext)
|
||||
|
||||
// ExitValue is called when exiting the value production.
|
||||
ExitValue(c *ValueContext)
|
||||
|
||||
// ExitKey is called when exiting the key production.
|
||||
ExitKey(c *KeyContext)
|
||||
}
|
||||
3539
pkg/parser/grammar/filterquery_parser.go
Normal file
3539
pkg/parser/grammar/filterquery_parser.go
Normal file
File diff suppressed because it is too large
Load Diff
61
pkg/parser/grammar/filterquery_visitor.go
Normal file
61
pkg/parser/grammar/filterquery_visitor.go
Normal file
@@ -0,0 +1,61 @@
|
||||
// Code generated from grammar/FilterQuery.g4 by ANTLR 4.13.2. DO NOT EDIT.
|
||||
|
||||
package parser // FilterQuery
|
||||
|
||||
import "github.com/antlr4-go/antlr/v4"
|
||||
|
||||
// A complete Visitor for a parse tree produced by FilterQueryParser.
|
||||
type FilterQueryVisitor interface {
|
||||
antlr.ParseTreeVisitor
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#query.
|
||||
VisitQuery(ctx *QueryContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#expression.
|
||||
VisitExpression(ctx *ExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#orExpression.
|
||||
VisitOrExpression(ctx *OrExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#andExpression.
|
||||
VisitAndExpression(ctx *AndExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#unaryExpression.
|
||||
VisitUnaryExpression(ctx *UnaryExpressionContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#primary.
|
||||
VisitPrimary(ctx *PrimaryContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#comparison.
|
||||
VisitComparison(ctx *ComparisonContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#inClause.
|
||||
VisitInClause(ctx *InClauseContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#notInClause.
|
||||
VisitNotInClause(ctx *NotInClauseContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#valueList.
|
||||
VisitValueList(ctx *ValueListContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#fullText.
|
||||
VisitFullText(ctx *FullTextContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#functionCall.
|
||||
VisitFunctionCall(ctx *FunctionCallContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#functionParamList.
|
||||
VisitFunctionParamList(ctx *FunctionParamListContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#functionParam.
|
||||
VisitFunctionParam(ctx *FunctionParamContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#array.
|
||||
VisitArray(ctx *ArrayContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#value.
|
||||
VisitValue(ctx *ValueContext) interface{}
|
||||
|
||||
// Visit a parse tree produced by FilterQueryParser#key.
|
||||
VisitKey(ctx *KeyContext) interface{}
|
||||
}
|
||||
22
pkg/query-service/Dockerfile.multi-arch
Normal file
22
pkg/query-service/Dockerfile.multi-arch
Normal file
@@ -0,0 +1,22 @@
|
||||
ARG ALPINE_SHA="pass-a-valid-docker-sha-otherwise-this-will-fail"
|
||||
|
||||
FROM alpine@sha256:${ALPINE_SHA}
|
||||
LABEL maintainer="signoz"
|
||||
WORKDIR /root
|
||||
|
||||
ARG OS="linux"
|
||||
ARG ARCH
|
||||
|
||||
RUN apk update && \
|
||||
apk add ca-certificates && \
|
||||
rm -rf /var/cache/apk/*
|
||||
|
||||
COPY ./target/${OS}-${ARCH}/signoz-community /root/signoz-community
|
||||
COPY ./conf/prometheus.yml /root/config/prometheus.yml
|
||||
COPY ./templates/email /root/templates
|
||||
COPY frontend/build/ /etc/signoz/web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz-community
|
||||
|
||||
ENTRYPOINT ["./signoz-community"]
|
||||
CMD ["-config", "/root/config/prometheus.yml"]
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/mailru/easyjson"
|
||||
"github.com/uptrace/bun"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -40,6 +41,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logs"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/resource"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/services"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/traces/smart"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/traces/tracedetail"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/common"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
@@ -6791,3 +6793,262 @@ func (r *ClickHouseReader) GetUpdatedMetricsMetadata(ctx context.Context, metric
|
||||
|
||||
return cachedMetadata, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) SearchTracesV2(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error) {
|
||||
searchSpansResult := []model.SearchSpansResult{
|
||||
{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError", "StatusMessage", "StatusCodeString", "SpanKind"},
|
||||
IsSubTree: false,
|
||||
Events: make([][]interface{}, 0),
|
||||
},
|
||||
}
|
||||
|
||||
var traceSummary model.TraceSummary
|
||||
summaryQuery := fmt.Sprintf("SELECT * from %s.%s WHERE trace_id=$1", r.TraceDB, r.traceSummaryTable)
|
||||
err := r.db.QueryRow(ctx, summaryQuery, params.TraceID).Scan(&traceSummary.TraceID, &traceSummary.Start, &traceSummary.End, &traceSummary.NumSpans)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
|
||||
if traceSummary.NumSpans > uint64(params.MaxSpansInTrace) {
|
||||
zap.L().Error("Max spans allowed in a trace limit reached", zap.Int("MaxSpansInTrace", params.MaxSpansInTrace),
|
||||
zap.Uint64("Count", traceSummary.NumSpans))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": traceSummary.NumSpans,
|
||||
"maxSpansInTraceLimit": params.MaxSpansInTrace,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_MAX_SPANS_ALLOWED_LIMIT_REACHED, data, claims.Email, true, false)
|
||||
}
|
||||
return nil, fmt.Errorf("max spans allowed in trace limit reached, please contact support for more details")
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": traceSummary.NumSpans,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_TRACE_DETAIL_API, data, claims.Email, true, false)
|
||||
}
|
||||
|
||||
var startTime, endTime, durationNano uint64
|
||||
var searchScanResponses []model.SpanItemV2
|
||||
|
||||
query := fmt.Sprintf("SELECT timestamp, duration_nano, span_id, trace_id, has_error, kind, resource_string_service$$name, name, references, attributes_string, attributes_number, attributes_bool, resources_string, events, status_message, status_code_string, kind_string FROM %s.%s WHERE trace_id=$1 and ts_bucket_start>=$2 and ts_bucket_start<=$3", r.TraceDB, r.traceTableName)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
err = r.db.Select(ctx, &searchScanResponses, query, params.TraceID, strconv.FormatInt(traceSummary.Start.Unix()-1800, 10), strconv.FormatInt(traceSummary.End.Unix(), 10))
|
||||
|
||||
zap.L().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
end := time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery took: ", zap.Duration("duration", end.Sub(start)))
|
||||
|
||||
searchSpansResult[0].Events = make([][]interface{}, len(searchScanResponses))
|
||||
|
||||
searchSpanResponses := []model.SearchSpanResponseItem{}
|
||||
start = time.Now()
|
||||
for _, item := range searchScanResponses {
|
||||
ref := []model.OtelSpanRef{}
|
||||
err := json.Unmarshal([]byte(item.References), &ref)
|
||||
if err != nil {
|
||||
zap.L().Error("Error unmarshalling references", zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// merge attributes_number and attributes_bool to attributes_string
|
||||
for k, v := range item.Attributes_bool {
|
||||
item.Attributes_string[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
for k, v := range item.Attributes_number {
|
||||
item.Attributes_string[k] = fmt.Sprintf("%v", v)
|
||||
}
|
||||
for k, v := range item.Resources_string {
|
||||
item.Attributes_string[k] = v
|
||||
}
|
||||
|
||||
jsonItem := model.SearchSpanResponseItem{
|
||||
SpanID: item.SpanID,
|
||||
TraceID: item.TraceID,
|
||||
ServiceName: item.ServiceName,
|
||||
Name: item.Name,
|
||||
Kind: int32(item.Kind),
|
||||
DurationNano: int64(item.DurationNano),
|
||||
HasError: item.HasError,
|
||||
StatusMessage: item.StatusMessage,
|
||||
StatusCodeString: item.StatusCodeString,
|
||||
SpanKind: item.SpanKind,
|
||||
References: ref,
|
||||
Events: item.Events,
|
||||
TagMap: item.Attributes_string,
|
||||
}
|
||||
|
||||
jsonItem.TimeUnixNano = uint64(item.TimeUnixNano.UnixNano() / 1000000)
|
||||
|
||||
searchSpanResponses = append(searchSpanResponses, jsonItem)
|
||||
if startTime == 0 || jsonItem.TimeUnixNano < startTime {
|
||||
startTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if endTime == 0 || jsonItem.TimeUnixNano > endTime {
|
||||
endTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if durationNano == 0 || uint64(jsonItem.DurationNano) > durationNano {
|
||||
durationNano = uint64(jsonItem.DurationNano)
|
||||
}
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery unmarshal took: ", zap.Duration("duration", end.Sub(start)))
|
||||
|
||||
if len(searchScanResponses) > params.SpansRenderLimit {
|
||||
start = time.Now()
|
||||
searchSpansResult, err = smart.SmartTraceAlgorithm(searchSpanResponses, params.SpanID, params.LevelUp, params.LevelDown, params.SpansRenderLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("smartTraceAlgo took: ", zap.Duration("duration", end.Sub(start)))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": len(searchScanResponses),
|
||||
"spansRenderLimit": params.SpansRenderLimit,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LARGE_TRACE_OPENED, data, claims.Email, true, false)
|
||||
}
|
||||
} else {
|
||||
for i, item := range searchSpanResponses {
|
||||
spanEvents := item.GetValues()
|
||||
searchSpansResult[0].Events[i] = spanEvents
|
||||
}
|
||||
}
|
||||
|
||||
searchSpansResult[0].StartTimestampMillis = startTime - (durationNano / 1000000)
|
||||
searchSpansResult[0].EndTimestampMillis = endTime + (durationNano / 1000000)
|
||||
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) SearchTraces(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error) {
|
||||
|
||||
if r.useTraceNewSchema {
|
||||
return r.SearchTracesV2(ctx, params)
|
||||
}
|
||||
|
||||
var countSpans uint64
|
||||
countQuery := fmt.Sprintf("SELECT count() as count from %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
||||
err := r.db.QueryRow(ctx, countQuery, params.TraceID).Scan(&countSpans)
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
|
||||
if countSpans > uint64(params.MaxSpansInTrace) {
|
||||
zap.L().Error("Max spans allowed in a trace limit reached", zap.Int("MaxSpansInTrace", params.MaxSpansInTrace),
|
||||
zap.Uint64("Count", countSpans))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": countSpans,
|
||||
"maxSpansInTraceLimit": params.MaxSpansInTrace,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_MAX_SPANS_ALLOWED_LIMIT_REACHED, data, claims.Email, true, false)
|
||||
}
|
||||
return nil, fmt.Errorf("max spans allowed in trace limit reached, please contact support for more details")
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": countSpans,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_TRACE_DETAIL_API, data, claims.Email, true, false)
|
||||
}
|
||||
|
||||
var startTime, endTime, durationNano uint64
|
||||
var searchScanResponses []model.SearchSpanDBResponseItem
|
||||
|
||||
query := fmt.Sprintf("SELECT timestamp, traceID, model FROM %s.%s WHERE traceID=$1", r.TraceDB, r.SpansTable)
|
||||
|
||||
start := time.Now()
|
||||
|
||||
err = r.db.Select(ctx, &searchScanResponses, query, params.TraceID)
|
||||
|
||||
zap.L().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.L().Error("Error in processing sql query", zap.Error(err))
|
||||
return nil, fmt.Errorf("error in processing sql query")
|
||||
}
|
||||
end := time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery took: ", zap.Duration("duration", end.Sub(start)))
|
||||
searchSpansResult := []model.SearchSpansResult{{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References", "Events", "HasError", "StatusMessage", "StatusCodeString", "SpanKind"},
|
||||
Events: make([][]interface{}, len(searchScanResponses)),
|
||||
IsSubTree: false,
|
||||
},
|
||||
}
|
||||
|
||||
searchSpanResponses := []model.SearchSpanResponseItem{}
|
||||
start = time.Now()
|
||||
for _, item := range searchScanResponses {
|
||||
var jsonItem model.SearchSpanResponseItem
|
||||
easyjson.Unmarshal([]byte(item.Model), &jsonItem)
|
||||
jsonItem.TimeUnixNano = uint64(item.Timestamp.UnixNano() / 1000000)
|
||||
searchSpanResponses = append(searchSpanResponses, jsonItem)
|
||||
if startTime == 0 || jsonItem.TimeUnixNano < startTime {
|
||||
startTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if endTime == 0 || jsonItem.TimeUnixNano > endTime {
|
||||
endTime = jsonItem.TimeUnixNano
|
||||
}
|
||||
if durationNano == 0 || uint64(jsonItem.DurationNano) > durationNano {
|
||||
durationNano = uint64(jsonItem.DurationNano)
|
||||
}
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("getTraceSQLQuery unmarshal took: ", zap.Duration("duration", end.Sub(start)))
|
||||
|
||||
if len(searchScanResponses) > params.SpansRenderLimit {
|
||||
start = time.Now()
|
||||
searchSpansResult, err = smart.SmartTraceAlgorithm(searchSpanResponses, params.SpanID, params.LevelUp, params.LevelDown, params.SpansRenderLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
end = time.Now()
|
||||
zap.L().Debug("smartTraceAlgo took: ", zap.Duration("duration", end.Sub(start)))
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if ok {
|
||||
data := map[string]interface{}{
|
||||
"traceSize": len(searchScanResponses),
|
||||
"spansRenderLimit": params.SpansRenderLimit,
|
||||
"algo": "smart",
|
||||
}
|
||||
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_LARGE_TRACE_OPENED, data, claims.Email, true, false)
|
||||
}
|
||||
} else {
|
||||
for i, item := range searchSpanResponses {
|
||||
spanEvents := item.GetValues()
|
||||
searchSpansResult[0].Events[i] = spanEvents
|
||||
}
|
||||
}
|
||||
|
||||
searchSpansResult[0].StartTimestampMillis = startTime - (durationNano / 1000000)
|
||||
searchSpansResult[0].EndTimestampMillis = endTime + (durationNano / 1000000)
|
||||
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
|
||||
@@ -8,68 +8,61 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/google/uuid"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type cloudProviderAccountsRepository interface {
|
||||
listConnected(ctx context.Context, cloudProvider string) ([]AccountRecord, *model.ApiError)
|
||||
listConnected(ctx context.Context, orgId string, provider string) ([]types.CloudIntegration, *model.ApiError)
|
||||
|
||||
get(ctx context.Context, cloudProvider string, id string) (*AccountRecord, *model.ApiError)
|
||||
get(ctx context.Context, orgId string, provider string, id string) (*types.CloudIntegration, *model.ApiError)
|
||||
|
||||
getConnectedCloudAccount(
|
||||
ctx context.Context, cloudProvider string, cloudAccountId string,
|
||||
) (*AccountRecord, *model.ApiError)
|
||||
// getActiveAccount(ctx context.Context, orgId string, provider string, accountId string) (*types.CloudIntegration, *model.ApiError)
|
||||
|
||||
getConnectedCloudAccount(ctx context.Context, orgId string, provider string, accountID string) (*types.CloudIntegration, *model.ApiError)
|
||||
|
||||
// Insert an account or update it by (cloudProvider, id)
|
||||
// for specified non-empty fields
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgId string,
|
||||
provider string,
|
||||
id *string,
|
||||
config *AccountConfig,
|
||||
cloudAccountId *string,
|
||||
agentReport *AgentReport,
|
||||
config *types.AccountConfig,
|
||||
accountId *string,
|
||||
agentReport *types.AgentReport,
|
||||
removedAt *time.Time,
|
||||
) (*AccountRecord, *model.ApiError)
|
||||
) (*types.CloudIntegration, *model.ApiError)
|
||||
}
|
||||
|
||||
func newCloudProviderAccountsRepository(db *sqlx.DB) (
|
||||
func newCloudProviderAccountsRepository(store sqlstore.SQLStore) (
|
||||
*cloudProviderAccountsSQLRepository, error,
|
||||
) {
|
||||
return &cloudProviderAccountsSQLRepository{
|
||||
db: db,
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type cloudProviderAccountsSQLRepository struct {
|
||||
db *sqlx.DB
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) listConnected(
|
||||
ctx context.Context, cloudProvider string,
|
||||
) ([]AccountRecord, *model.ApiError) {
|
||||
accounts := []AccountRecord{}
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) ([]types.CloudIntegration, *model.ApiError) {
|
||||
accounts := []types.CloudIntegration{}
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&accounts).
|
||||
Where("org_id = ?", orgId).
|
||||
Where("provider = ?", cloudProvider).
|
||||
Where("removed_at is NULL").
|
||||
Where("account_id is not NULL").
|
||||
Where("last_agent_report is not NULL").
|
||||
Order("created_at").
|
||||
Scan(ctx)
|
||||
|
||||
err := r.db.SelectContext(
|
||||
ctx, &accounts, `
|
||||
select
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
created_at,
|
||||
removed_at
|
||||
from cloud_integrations_accounts
|
||||
where
|
||||
cloud_provider=$1
|
||||
and removed_at is NULL
|
||||
and cloud_account_id is not NULL
|
||||
and last_agent_report_json is not NULL
|
||||
order by created_at
|
||||
`, cloudProvider,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query connected cloud accounts: %w", err,
|
||||
@@ -80,27 +73,16 @@ func (r *cloudProviderAccountsSQLRepository) listConnected(
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) get(
|
||||
ctx context.Context, cloudProvider string, id string,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
var result AccountRecord
|
||||
ctx context.Context, orgId string, provider string, id string,
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
var result types.CloudIntegration
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
select
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
created_at,
|
||||
removed_at
|
||||
from cloud_integrations_accounts
|
||||
where
|
||||
cloud_provider=$1
|
||||
and id=$2
|
||||
`,
|
||||
cloudProvider, id,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
Where("org_id = ?", orgId).
|
||||
Where("provider = ?", provider).
|
||||
Where("id = ?", id).
|
||||
Scan(ctx)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
@@ -115,34 +97,50 @@ func (r *cloudProviderAccountsSQLRepository) get(
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
|
||||
ctx context.Context, cloudProvider string, cloudAccountId string,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
var result AccountRecord
|
||||
// get the latest active account for the given accountID
|
||||
// func (r *cloudProviderAccountsSQLRepository) getActiveAccount(
|
||||
// ctx context.Context, orgId string, provider string, accountId string,
|
||||
// ) (*types.CloudIntegration, *model.ApiError) {
|
||||
// var result types.CloudIntegration
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
select
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
created_at,
|
||||
removed_at
|
||||
from cloud_integrations_accounts
|
||||
where
|
||||
cloud_provider=$1
|
||||
and cloud_account_id=$2
|
||||
and last_agent_report_json is not NULL
|
||||
and removed_at is NULL
|
||||
`,
|
||||
cloudProvider, cloudAccountId,
|
||||
)
|
||||
// err := r.store.BunDB().NewSelect().
|
||||
// Model(&result).
|
||||
// Where("org_id = ?", orgId).
|
||||
// Where("provider = ?", provider).
|
||||
// Where("account_id = ?", accountId).
|
||||
// Where("removed_at is NULL").
|
||||
// Scan(ctx)
|
||||
|
||||
// if err == sql.ErrNoRows {
|
||||
// return nil, model.NotFoundError(fmt.Errorf(
|
||||
// "couldn't find connected cloud account %s", accountId,
|
||||
// ))
|
||||
// } else if err != nil {
|
||||
// return nil, model.InternalError(fmt.Errorf(
|
||||
// "couldn't query cloud provider accounts: %w", err,
|
||||
// ))
|
||||
// }
|
||||
|
||||
// return &result, nil
|
||||
// }
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
|
||||
ctx context.Context, orgId string, provider string, accountId string,
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
var result types.CloudIntegration
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
Where("org_id = ?", orgId).
|
||||
Where("provider = ?", provider).
|
||||
Where("account_id = ?", accountId).
|
||||
Where("last_agent_report is not NULL").
|
||||
Where("removed_at is NULL").
|
||||
Scan(ctx)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find connected cloud account %s", cloudAccountId,
|
||||
"couldn't find connected cloud account %s", accountId,
|
||||
))
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
@@ -155,17 +153,18 @@ func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) upsert(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgId string,
|
||||
provider string,
|
||||
id *string,
|
||||
config *AccountConfig,
|
||||
cloudAccountId *string,
|
||||
agentReport *AgentReport,
|
||||
config *types.AccountConfig,
|
||||
accountId *string,
|
||||
agentReport *types.AgentReport,
|
||||
removedAt *time.Time,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
// Insert
|
||||
if id == nil {
|
||||
newId := uuid.NewString()
|
||||
id = &newId
|
||||
temp := valuer.GenerateUUID().StringValue()
|
||||
id = &temp
|
||||
}
|
||||
|
||||
// Prepare clause for setting values in `on conflict do update`
|
||||
@@ -176,19 +175,19 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
|
||||
|
||||
if config != nil {
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("config_json"),
|
||||
onConflictSetStmts, setColStatement("config"),
|
||||
)
|
||||
}
|
||||
|
||||
if cloudAccountId != nil {
|
||||
if accountId != nil {
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("cloud_account_id"),
|
||||
onConflictSetStmts, setColStatement("account_id"),
|
||||
)
|
||||
}
|
||||
|
||||
if agentReport != nil {
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("last_agent_report_json"),
|
||||
onConflictSetStmts, setColStatement("last_agent_report"),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -198,37 +197,45 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
|
||||
)
|
||||
}
|
||||
|
||||
// set updated_at to current timestamp if it's an upsert
|
||||
onConflictSetStmts = append(
|
||||
onConflictSetStmts, setColStatement("updated_at"),
|
||||
)
|
||||
|
||||
onConflictClause := ""
|
||||
if len(onConflictSetStmts) > 0 {
|
||||
onConflictClause = fmt.Sprintf(
|
||||
"on conflict(cloud_provider, id) do update SET\n%s",
|
||||
"conflict(id, provider, org_id) do update SET\n%s",
|
||||
strings.Join(onConflictSetStmts, ",\n"),
|
||||
)
|
||||
}
|
||||
|
||||
insertQuery := fmt.Sprintf(`
|
||||
INSERT INTO cloud_integrations_accounts (
|
||||
cloud_provider,
|
||||
id,
|
||||
config_json,
|
||||
cloud_account_id,
|
||||
last_agent_report_json,
|
||||
removed_at
|
||||
) values ($1, $2, $3, $4, $5, $6)
|
||||
%s`, onConflictClause,
|
||||
)
|
||||
integration := types.CloudIntegration{
|
||||
OrgID: orgId,
|
||||
Provider: provider,
|
||||
Identifiable: types.Identifiable{ID: valuer.MustNewUUID(*id)},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
Config: config,
|
||||
AccountID: accountId,
|
||||
LastAgentReport: agentReport,
|
||||
RemovedAt: removedAt,
|
||||
}
|
||||
|
||||
_, dbErr := r.store.BunDB().NewInsert().
|
||||
Model(&integration).
|
||||
On(onConflictClause).
|
||||
Exec(ctx)
|
||||
|
||||
_, dbErr := r.db.ExecContext(
|
||||
ctx, insertQuery,
|
||||
cloudProvider, id, config, cloudAccountId, agentReport, removedAt,
|
||||
)
|
||||
if dbErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not upsert cloud account record: %w", dbErr,
|
||||
))
|
||||
}
|
||||
|
||||
upsertedAccount, apiErr := r.get(ctx, cloudProvider, *id)
|
||||
upsertedAccount, apiErr := r.get(ctx, orgId, provider, *id)
|
||||
if apiErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't fetch upserted account by id: %w", apiErr.ToError(),
|
||||
|
||||
@@ -33,12 +33,12 @@ type Controller struct {
|
||||
func NewController(sqlStore sqlstore.SQLStore) (
|
||||
*Controller, error,
|
||||
) {
|
||||
accountsRepo, err := newCloudProviderAccountsRepository(sqlStore.SQLxDB())
|
||||
accountsRepo, err := newCloudProviderAccountsRepository(sqlStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create cloud provider accounts repo: %w", err)
|
||||
}
|
||||
|
||||
serviceConfigRepo, err := newServiceConfigRepository(sqlStore.SQLxDB())
|
||||
serviceConfigRepo, err := newServiceConfigRepository(sqlStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create cloud provider service config repo: %w", err)
|
||||
}
|
||||
@@ -49,19 +49,12 @@ func NewController(sqlStore sqlstore.SQLStore) (
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config AccountConfig `json:"config"`
|
||||
Status AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
type ConnectedAccountsListResponse struct {
|
||||
Accounts []Account `json:"accounts"`
|
||||
Accounts []types.Account `json:"accounts"`
|
||||
}
|
||||
|
||||
func (c *Controller) ListConnectedAccounts(
|
||||
ctx context.Context, cloudProvider string,
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (
|
||||
*ConnectedAccountsListResponse, *model.ApiError,
|
||||
) {
|
||||
@@ -69,14 +62,14 @@ func (c *Controller) ListConnectedAccounts(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, cloudProvider)
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgId, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud accounts")
|
||||
}
|
||||
|
||||
connectedAccounts := []Account{}
|
||||
connectedAccounts := []types.Account{}
|
||||
for _, a := range accountRecords {
|
||||
connectedAccounts = append(connectedAccounts, a.account())
|
||||
connectedAccounts = append(connectedAccounts, a.Account())
|
||||
}
|
||||
|
||||
return &ConnectedAccountsListResponse{
|
||||
@@ -88,7 +81,7 @@ type GenerateConnectionUrlRequest struct {
|
||||
// Optional. To be specified for updates.
|
||||
AccountId *string `json:"account_id,omitempty"`
|
||||
|
||||
AccountConfig AccountConfig `json:"account_config"`
|
||||
AccountConfig types.AccountConfig `json:"account_config"`
|
||||
|
||||
AgentConfig SigNozAgentConfig `json:"agent_config"`
|
||||
}
|
||||
@@ -109,7 +102,7 @@ type GenerateConnectionUrlResponse struct {
|
||||
}
|
||||
|
||||
func (c *Controller) GenerateConnectionUrl(
|
||||
ctx context.Context, cloudProvider string, req GenerateConnectionUrlRequest,
|
||||
ctx context.Context, orgId string, cloudProvider string, req GenerateConnectionUrlRequest,
|
||||
) (*GenerateConnectionUrlResponse, *model.ApiError) {
|
||||
// Account connection with a simple connection URL may not be available for all providers.
|
||||
if cloudProvider != "aws" {
|
||||
@@ -117,7 +110,7 @@ func (c *Controller) GenerateConnectionUrl(
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.upsert(
|
||||
ctx, cloudProvider, req.AccountId, &req.AccountConfig, nil, nil, nil,
|
||||
ctx, orgId, cloudProvider, req.AccountId, &req.AccountConfig, nil, nil, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
@@ -135,7 +128,7 @@ func (c *Controller) GenerateConnectionUrl(
|
||||
"param_SigNozIntegrationAgentVersion": agentVersion,
|
||||
"param_SigNozApiUrl": req.AgentConfig.SigNozAPIUrl,
|
||||
"param_SigNozApiKey": req.AgentConfig.SigNozAPIKey,
|
||||
"param_SigNozAccountId": account.Id,
|
||||
"param_SigNozAccountId": account.ID.StringValue(),
|
||||
"param_IngestionUrl": req.AgentConfig.IngestionUrl,
|
||||
"param_IngestionKey": req.AgentConfig.IngestionKey,
|
||||
"stackName": "signoz-integration",
|
||||
@@ -148,19 +141,19 @@ func (c *Controller) GenerateConnectionUrl(
|
||||
}
|
||||
|
||||
return &GenerateConnectionUrlResponse{
|
||||
AccountId: account.Id,
|
||||
AccountId: account.ID.StringValue(),
|
||||
ConnectionUrl: connectionUrl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type AccountStatusResponse struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
||||
Status AccountStatus `json:"status"`
|
||||
Id string `json:"id"`
|
||||
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
||||
Status types.AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
func (c *Controller) GetAccountStatus(
|
||||
ctx context.Context, cloudProvider string, accountId string,
|
||||
ctx context.Context, orgId string, cloudProvider string, accountId string,
|
||||
) (
|
||||
*AccountStatusResponse, *model.ApiError,
|
||||
) {
|
||||
@@ -168,23 +161,23 @@ func (c *Controller) GetAccountStatus(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.get(ctx, cloudProvider, accountId)
|
||||
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
resp := AccountStatusResponse{
|
||||
Id: account.Id,
|
||||
CloudAccountId: account.CloudAccountId,
|
||||
Status: account.status(),
|
||||
Id: account.ID.StringValue(),
|
||||
CloudAccountId: account.AccountID,
|
||||
Status: account.Status(),
|
||||
}
|
||||
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
type AgentCheckInRequest struct {
|
||||
AccountId string `json:"account_id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
ID string `json:"account_id"`
|
||||
AccountID string `json:"cloud_account_id"`
|
||||
// Arbitrary cloud specific Agent data
|
||||
Data map[string]any `json:"data,omitempty"`
|
||||
}
|
||||
@@ -204,35 +197,35 @@ type IntegrationConfigForAgent struct {
|
||||
}
|
||||
|
||||
func (c *Controller) CheckInAsAgent(
|
||||
ctx context.Context, cloudProvider string, req AgentCheckInRequest,
|
||||
ctx context.Context, orgId string, cloudProvider string, req AgentCheckInRequest,
|
||||
) (*AgentCheckInResponse, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
existingAccount, apiErr := c.accountsRepo.get(ctx, cloudProvider, req.AccountId)
|
||||
if existingAccount != nil && existingAccount.CloudAccountId != nil && *existingAccount.CloudAccountId != req.CloudAccountId {
|
||||
existingAccount, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, req.ID)
|
||||
if existingAccount != nil && existingAccount.AccountID != nil && *existingAccount.AccountID != req.AccountID {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"can't check in with new %s account id %s for account %s with existing %s id %s",
|
||||
cloudProvider, req.CloudAccountId, existingAccount.Id, cloudProvider, *existingAccount.CloudAccountId,
|
||||
cloudProvider, req.AccountID, existingAccount.ID.StringValue(), cloudProvider, *existingAccount.AccountID,
|
||||
))
|
||||
}
|
||||
|
||||
existingAccount, apiErr = c.accountsRepo.getConnectedCloudAccount(ctx, cloudProvider, req.CloudAccountId)
|
||||
if existingAccount != nil && existingAccount.Id != req.AccountId {
|
||||
existingAccount, apiErr = c.accountsRepo.getConnectedCloudAccount(ctx, orgId, cloudProvider, req.AccountID)
|
||||
if existingAccount != nil && existingAccount.ID.StringValue() != req.ID {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"can't check in to %s account %s with id %s. already connected with id %s",
|
||||
cloudProvider, req.CloudAccountId, req.AccountId, existingAccount.Id,
|
||||
cloudProvider, req.AccountID, req.ID, existingAccount.ID.StringValue(),
|
||||
))
|
||||
}
|
||||
|
||||
agentReport := AgentReport{
|
||||
agentReport := types.AgentReport{
|
||||
TimestampMillis: time.Now().UnixMilli(),
|
||||
Data: req.Data,
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.upsert(
|
||||
ctx, cloudProvider, &req.AccountId, nil, &req.CloudAccountId, &agentReport, nil,
|
||||
ctx, orgId, cloudProvider, &req.ID, nil, &req.AccountID, &agentReport, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
@@ -265,7 +258,7 @@ func (c *Controller) CheckInAsAgent(
|
||||
}
|
||||
|
||||
svcConfigs, apiErr := c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *account.CloudAccountId,
|
||||
ctx, orgId, account.ID.StringValue(),
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
@@ -298,54 +291,55 @@ func (c *Controller) CheckInAsAgent(
|
||||
}
|
||||
|
||||
return &AgentCheckInResponse{
|
||||
AccountId: account.Id,
|
||||
CloudAccountId: *account.CloudAccountId,
|
||||
AccountId: account.ID.StringValue(),
|
||||
CloudAccountId: *account.AccountID,
|
||||
RemovedAt: account.RemovedAt,
|
||||
IntegrationConfig: agentConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type UpdateAccountConfigRequest struct {
|
||||
Config AccountConfig `json:"config"`
|
||||
Config types.AccountConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateAccountConfig(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
cloudProvider string,
|
||||
accountId string,
|
||||
req UpdateAccountConfigRequest,
|
||||
) (*Account, *model.ApiError) {
|
||||
) (*types.Account, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
accountRecord, apiErr := c.accountsRepo.upsert(
|
||||
ctx, cloudProvider, &accountId, &req.Config, nil, nil, nil,
|
||||
ctx, orgId, cloudProvider, &accountId, &req.Config, nil, nil, nil,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't upsert cloud account")
|
||||
}
|
||||
|
||||
account := accountRecord.account()
|
||||
account := accountRecord.Account()
|
||||
|
||||
return &account, nil
|
||||
}
|
||||
|
||||
func (c *Controller) DisconnectAccount(
|
||||
ctx context.Context, cloudProvider string, accountId string,
|
||||
) (*AccountRecord, *model.ApiError) {
|
||||
ctx context.Context, orgId string, cloudProvider string, accountId string,
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
account, apiErr := c.accountsRepo.get(ctx, cloudProvider, accountId)
|
||||
account, apiErr := c.accountsRepo.get(ctx, orgId, cloudProvider, accountId)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
||||
}
|
||||
|
||||
tsNow := time.Now()
|
||||
account, apiErr = c.accountsRepo.upsert(
|
||||
ctx, cloudProvider, &accountId, nil, nil, nil, &tsNow,
|
||||
ctx, orgId, cloudProvider, &accountId, nil, nil, nil, &tsNow,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't disconnect account")
|
||||
@@ -360,6 +354,7 @@ type ListServicesResponse struct {
|
||||
|
||||
func (c *Controller) ListServices(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
cloudAccountId *string,
|
||||
) (*ListServicesResponse, *model.ApiError) {
|
||||
@@ -373,10 +368,16 @@ func (c *Controller) ListServices(
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
|
||||
}
|
||||
|
||||
svcConfigs := map[string]*CloudServiceConfig{}
|
||||
svcConfigs := map[string]*types.CloudServiceConfig{}
|
||||
if cloudAccountId != nil {
|
||||
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, orgID, cloudProvider, *cloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't get active account")
|
||||
}
|
||||
svcConfigs, apiErr = c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *cloudAccountId,
|
||||
ctx, orgID, activeAccount.ID.StringValue(),
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
@@ -400,6 +401,7 @@ func (c *Controller) ListServices(
|
||||
|
||||
func (c *Controller) GetServiceDetails(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
serviceId string,
|
||||
cloudAccountId *string,
|
||||
@@ -415,8 +417,16 @@ func (c *Controller) GetServiceDetails(
|
||||
}
|
||||
|
||||
if cloudAccountId != nil {
|
||||
|
||||
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, orgID, cloudProvider, *cloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't get active account")
|
||||
}
|
||||
|
||||
config, apiErr := c.serviceConfigRepo.get(
|
||||
ctx, cloudProvider, *cloudAccountId, serviceId,
|
||||
ctx, orgID, activeAccount.ID.StringValue(), serviceId,
|
||||
)
|
||||
if apiErr != nil && apiErr.Type() != model.ErrorNotFound {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't fetch service config")
|
||||
@@ -443,17 +453,18 @@ func (c *Controller) GetServiceDetails(
|
||||
}
|
||||
|
||||
type UpdateServiceConfigRequest struct {
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config CloudServiceConfig `json:"config"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config types.CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
type UpdateServiceConfigResponse struct {
|
||||
Id string `json:"id"`
|
||||
Config CloudServiceConfig `json:"config"`
|
||||
Id string `json:"id"`
|
||||
Config types.CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateServiceConfig(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
serviceId string,
|
||||
req UpdateServiceConfigRequest,
|
||||
@@ -465,7 +476,7 @@ func (c *Controller) UpdateServiceConfig(
|
||||
|
||||
// can only update config for a connected cloud account id
|
||||
_, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, cloudProvider, req.CloudAccountId,
|
||||
ctx, orgID, cloudProvider, req.CloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't find connected cloud account")
|
||||
@@ -478,7 +489,7 @@ func (c *Controller) UpdateServiceConfig(
|
||||
}
|
||||
|
||||
updatedConfig, apiErr := c.serviceConfigRepo.upsert(
|
||||
ctx, cloudProvider, req.CloudAccountId, serviceId, req.Config,
|
||||
ctx, orgID, cloudProvider, req.CloudAccountId, serviceId, req.Config,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't update service config")
|
||||
@@ -492,13 +503,13 @@ func (c *Controller) UpdateServiceConfig(
|
||||
|
||||
// All dashboards that are available based on cloud integrations configuration
|
||||
// across all cloud providers
|
||||
func (c *Controller) AvailableDashboards(ctx context.Context) (
|
||||
func (c *Controller) AvailableDashboards(ctx context.Context, orgId string) (
|
||||
[]types.Dashboard, *model.ApiError,
|
||||
) {
|
||||
allDashboards := []types.Dashboard{}
|
||||
|
||||
for _, provider := range []string{"aws"} {
|
||||
providerDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, provider)
|
||||
providerDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, provider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, fmt.Sprintf("couldn't get available dashboards for %s", provider),
|
||||
@@ -512,10 +523,10 @@ func (c *Controller) AvailableDashboards(ctx context.Context) (
|
||||
}
|
||||
|
||||
func (c *Controller) AvailableDashboardsForCloudProvider(
|
||||
ctx context.Context, cloudProvider string,
|
||||
ctx context.Context, orgID string, cloudProvider string,
|
||||
) ([]types.Dashboard, *model.ApiError) {
|
||||
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, cloudProvider)
|
||||
accountRecords, apiErr := c.accountsRepo.listConnected(ctx, orgID, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list connected cloud accounts")
|
||||
}
|
||||
@@ -524,9 +535,9 @@ func (c *Controller) AvailableDashboardsForCloudProvider(
|
||||
servicesWithAvailableMetrics := map[string]*time.Time{}
|
||||
|
||||
for _, ar := range accountRecords {
|
||||
if ar.CloudAccountId != nil {
|
||||
if ar.AccountID != nil {
|
||||
configsBySvcId, apiErr := c.serviceConfigRepo.getAllForAccount(
|
||||
ctx, cloudProvider, *ar.CloudAccountId,
|
||||
ctx, orgID, ar.ID.StringValue(),
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
@@ -574,6 +585,7 @@ func (c *Controller) AvailableDashboardsForCloudProvider(
|
||||
}
|
||||
func (c *Controller) GetDashboardById(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
dashboardUuid string,
|
||||
) (*types.Dashboard, *model.ApiError) {
|
||||
cloudProvider, _, _, apiErr := c.parseDashboardUuid(dashboardUuid)
|
||||
@@ -581,7 +593,7 @@ func (c *Controller) GetDashboardById(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
allDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, cloudProvider)
|
||||
allDashboards, apiErr := c.AvailableDashboardsForCloudProvider(ctx, orgId, cloudProvider)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, fmt.Sprintf("couldn't list available dashboards"),
|
||||
|
||||
@@ -4,23 +4,30 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/dao"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
require := require.New(t)
|
||||
sqlStore, _ := utils.NewTestSqliteDB(t)
|
||||
sqlStore := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(sqlStore)
|
||||
require.NoError(err)
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
require.Nil(apiErr)
|
||||
|
||||
// should be able to generate connection url for
|
||||
// same account id again with updated config
|
||||
testAccountConfig1 := AccountConfig{EnabledRegions: []string{"us-east-1", "us-west-1"}}
|
||||
testAccountConfig1 := types.AccountConfig{EnabledRegions: []string{"us-east-1", "us-west-1"}}
|
||||
resp1, apiErr := controller.GenerateConnectionUrl(
|
||||
context.TODO(), "aws", GenerateConnectionUrlRequest{
|
||||
context.TODO(), user.OrgID, "aws", GenerateConnectionUrlRequest{
|
||||
AccountConfig: testAccountConfig1,
|
||||
AgentConfig: SigNozAgentConfig{Region: "us-east-2"},
|
||||
},
|
||||
@@ -31,14 +38,14 @@ func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
|
||||
testAccountId := resp1.AccountId
|
||||
account, apiErr := controller.accountsRepo.get(
|
||||
context.TODO(), "aws", testAccountId,
|
||||
context.TODO(), user.OrgID, "aws", testAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountConfig1, *account.Config)
|
||||
|
||||
testAccountConfig2 := AccountConfig{EnabledRegions: []string{"us-east-2", "us-west-2"}}
|
||||
testAccountConfig2 := types.AccountConfig{EnabledRegions: []string{"us-east-2", "us-west-2"}}
|
||||
resp2, apiErr := controller.GenerateConnectionUrl(
|
||||
context.TODO(), "aws", GenerateConnectionUrlRequest{
|
||||
context.TODO(), user.OrgID, "aws", GenerateConnectionUrlRequest{
|
||||
AccountId: &testAccountId,
|
||||
AccountConfig: testAccountConfig2,
|
||||
AgentConfig: SigNozAgentConfig{Region: "us-east-2"},
|
||||
@@ -48,7 +55,7 @@ func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
require.Equal(testAccountId, resp2.AccountId)
|
||||
|
||||
account, apiErr = controller.accountsRepo.get(
|
||||
context.TODO(), "aws", testAccountId,
|
||||
context.TODO(), user.OrgID, "aws", testAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountConfig2, *account.Config)
|
||||
@@ -56,18 +63,21 @@ func TestRegenerateConnectionUrlWithUpdatedConfig(t *testing.T) {
|
||||
|
||||
func TestAgentCheckIns(t *testing.T) {
|
||||
require := require.New(t)
|
||||
sqlStore, _ := utils.NewTestSqliteDB(t)
|
||||
sqlStore := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(sqlStore)
|
||||
require.NoError(err)
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
require.Nil(apiErr)
|
||||
|
||||
// An agent should be able to check in from a cloud account even
|
||||
// if no connection url was requested (no account with agent's account id exists)
|
||||
testAccountId1 := uuid.NewString()
|
||||
testCloudAccountId1 := "546311234"
|
||||
resp1, apiErr := controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId1,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId1,
|
||||
AccountID: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
@@ -78,9 +88,9 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
// cloud account id for the same account.
|
||||
testCloudAccountId2 := "99999999"
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId1,
|
||||
CloudAccountId: testCloudAccountId2,
|
||||
context.TODO(), user.OrgID, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId1,
|
||||
AccountID: testCloudAccountId2,
|
||||
},
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
@@ -90,18 +100,18 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
// i.e. there can't be 2 connected account records for the same cloud account id
|
||||
// at any point in time.
|
||||
existingConnected, apiErr := controller.accountsRepo.getConnectedCloudAccount(
|
||||
context.TODO(), "aws", testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", testCloudAccountId1,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.NotNil(existingConnected)
|
||||
require.Equal(testCloudAccountId1, *existingConnected.CloudAccountId)
|
||||
require.Equal(testCloudAccountId1, *existingConnected.AccountID)
|
||||
require.Nil(existingConnected.RemovedAt)
|
||||
|
||||
testAccountId2 := uuid.NewString()
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId2,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId2,
|
||||
AccountID: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
@@ -109,29 +119,29 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
// After disconnecting existing account record, the agent should be able to
|
||||
// connected for a particular cloud account id
|
||||
_, apiErr = controller.DisconnectAccount(
|
||||
context.TODO(), "aws", testAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", testAccountId1,
|
||||
)
|
||||
|
||||
existingConnected, apiErr = controller.accountsRepo.getConnectedCloudAccount(
|
||||
context.TODO(), "aws", testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", testCloudAccountId1,
|
||||
)
|
||||
require.Nil(existingConnected)
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId2,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId2,
|
||||
AccountID: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
|
||||
// should be able to keep checking in
|
||||
_, apiErr = controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId2,
|
||||
CloudAccountId: testCloudAccountId1,
|
||||
context.TODO(), user.OrgID, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId2,
|
||||
AccountID: testCloudAccountId1,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
@@ -139,13 +149,16 @@ func TestAgentCheckIns(t *testing.T) {
|
||||
|
||||
func TestCantDisconnectNonExistentAccount(t *testing.T) {
|
||||
require := require.New(t)
|
||||
sqlStore, _ := utils.NewTestSqliteDB(t)
|
||||
sqlStore := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(sqlStore)
|
||||
require.NoError(err)
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
require.Nil(apiErr)
|
||||
|
||||
// Attempting to disconnect a non-existent account should return error
|
||||
account, apiErr := controller.DisconnectAccount(
|
||||
context.TODO(), "aws", uuid.NewString(),
|
||||
context.TODO(), user.OrgID, "aws", uuid.NewString(),
|
||||
)
|
||||
require.NotNil(apiErr)
|
||||
require.Equal(model.ErrorNotFound, apiErr.Type())
|
||||
@@ -154,15 +167,23 @@ func TestCantDisconnectNonExistentAccount(t *testing.T) {
|
||||
|
||||
func TestConfigureService(t *testing.T) {
|
||||
require := require.New(t)
|
||||
sqlStore, _ := utils.NewTestSqliteDB(t)
|
||||
sqlStore := utils.NewQueryServiceDBForTests(t)
|
||||
controller, err := NewController(sqlStore)
|
||||
require.NoError(err)
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
require.Nil(apiErr)
|
||||
|
||||
// create a connected account
|
||||
testCloudAccountId := "546311234"
|
||||
testConnectedAccount := makeTestConnectedAccount(t, user.OrgID, controller, testCloudAccountId)
|
||||
require.Nil(testConnectedAccount.RemovedAt)
|
||||
require.NotEmpty(testConnectedAccount.AccountID)
|
||||
require.Equal(testCloudAccountId, *testConnectedAccount.AccountID)
|
||||
|
||||
// should start out without any service config
|
||||
svcListResp, apiErr := controller.ListServices(
|
||||
context.TODO(), "aws", &testCloudAccountId,
|
||||
context.TODO(), user.OrgID, "aws", &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
|
||||
@@ -170,25 +191,20 @@ func TestConfigureService(t *testing.T) {
|
||||
require.Nil(svcListResp.Services[0].Config)
|
||||
|
||||
svcDetails, apiErr := controller.GetServiceDetails(
|
||||
context.TODO(), "aws", testSvcId, &testCloudAccountId,
|
||||
context.TODO(), user.OrgID, "aws", testSvcId, &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testSvcId, svcDetails.Id)
|
||||
require.Nil(svcDetails.Config)
|
||||
|
||||
// should be able to configure a service for a connected account
|
||||
testConnectedAccount := makeTestConnectedAccount(t, controller, testCloudAccountId)
|
||||
require.Nil(testConnectedAccount.RemovedAt)
|
||||
require.NotNil(testConnectedAccount.CloudAccountId)
|
||||
require.Equal(testCloudAccountId, *testConnectedAccount.CloudAccountId)
|
||||
|
||||
testSvcConfig := CloudServiceConfig{
|
||||
Metrics: &CloudServiceMetricsConfig{
|
||||
testSvcConfig := types.CloudServiceConfig{
|
||||
Metrics: &types.CloudServiceMetricsConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
updateSvcConfigResp, apiErr := controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", testSvcId, UpdateServiceConfigRequest{
|
||||
context.TODO(), user.OrgID, "aws", testSvcId, UpdateServiceConfigRequest{
|
||||
CloudAccountId: testCloudAccountId,
|
||||
Config: testSvcConfig,
|
||||
},
|
||||
@@ -198,14 +214,14 @@ func TestConfigureService(t *testing.T) {
|
||||
require.Equal(testSvcConfig, updateSvcConfigResp.Config)
|
||||
|
||||
svcDetails, apiErr = controller.GetServiceDetails(
|
||||
context.TODO(), "aws", testSvcId, &testCloudAccountId,
|
||||
context.TODO(), user.OrgID, "aws", testSvcId, &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testSvcId, svcDetails.Id)
|
||||
require.Equal(testSvcConfig, *svcDetails.Config)
|
||||
|
||||
svcListResp, apiErr = controller.ListServices(
|
||||
context.TODO(), "aws", &testCloudAccountId,
|
||||
context.TODO(), user.OrgID, "aws", &testCloudAccountId,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
for _, svc := range svcListResp.Services {
|
||||
@@ -216,12 +232,12 @@ func TestConfigureService(t *testing.T) {
|
||||
|
||||
// should not be able to configure service after cloud account has been disconnected
|
||||
_, apiErr = controller.DisconnectAccount(
|
||||
context.TODO(), "aws", testConnectedAccount.Id,
|
||||
context.TODO(), user.OrgID, "aws", testConnectedAccount.ID.StringValue(),
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
|
||||
_, apiErr = controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", testSvcId,
|
||||
context.TODO(), user.OrgID, "aws", testSvcId,
|
||||
UpdateServiceConfigRequest{
|
||||
CloudAccountId: testCloudAccountId,
|
||||
Config: testSvcConfig,
|
||||
@@ -231,7 +247,7 @@ func TestConfigureService(t *testing.T) {
|
||||
|
||||
// should not be able to configure a service for a cloud account id that is not connected yet
|
||||
_, apiErr = controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", testSvcId,
|
||||
context.TODO(), user.OrgID, "aws", testSvcId,
|
||||
UpdateServiceConfigRequest{
|
||||
CloudAccountId: "9999999999",
|
||||
Config: testSvcConfig,
|
||||
@@ -241,7 +257,7 @@ func TestConfigureService(t *testing.T) {
|
||||
|
||||
// should not be able to set config for an unsupported service
|
||||
_, apiErr = controller.UpdateServiceConfig(
|
||||
context.TODO(), "aws", "bad-service", UpdateServiceConfigRequest{
|
||||
context.TODO(), user.OrgID, "aws", "bad-service", UpdateServiceConfigRequest{
|
||||
CloudAccountId: testCloudAccountId,
|
||||
Config: testSvcConfig,
|
||||
},
|
||||
@@ -250,22 +266,54 @@ func TestConfigureService(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func makeTestConnectedAccount(t *testing.T, controller *Controller, cloudAccountId string) *AccountRecord {
|
||||
func makeTestConnectedAccount(t *testing.T, orgId string, controller *Controller, cloudAccountId string) *types.CloudIntegration {
|
||||
require := require.New(t)
|
||||
|
||||
// a check in from SigNoz agent creates or updates a connected account.
|
||||
testAccountId := uuid.NewString()
|
||||
resp, apiErr := controller.CheckInAsAgent(
|
||||
context.TODO(), "aws", AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: cloudAccountId,
|
||||
context.TODO(), orgId, "aws", AgentCheckInRequest{
|
||||
ID: testAccountId,
|
||||
AccountID: cloudAccountId,
|
||||
},
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(testAccountId, resp.AccountId)
|
||||
require.Equal(cloudAccountId, resp.CloudAccountId)
|
||||
|
||||
acc, err := controller.accountsRepo.get(context.TODO(), "aws", resp.AccountId)
|
||||
acc, err := controller.accountsRepo.get(context.TODO(), orgId, "aws", resp.AccountId)
|
||||
require.Nil(err)
|
||||
return acc
|
||||
}
|
||||
|
||||
func createTestUser() (*types.User, *model.ApiError) {
|
||||
// Create a test user for auth
|
||||
ctx := context.Background()
|
||||
org, apiErr := dao.DB().CreateOrg(ctx, &types.Organization{
|
||||
Name: "test",
|
||||
})
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
group, apiErr := dao.DB().GetGroupByName(ctx, constants.AdminGroup)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
auth.InitAuthCache(ctx)
|
||||
|
||||
userId := uuid.NewString()
|
||||
return dao.DB().CreateUser(
|
||||
ctx,
|
||||
&types.User{
|
||||
ID: userId,
|
||||
Name: "test",
|
||||
Email: userId[:8] + "test@test.com",
|
||||
Password: "test",
|
||||
OrgID: org.ID,
|
||||
GroupID: group.ID,
|
||||
},
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,123 +1,11 @@
|
||||
package cloudintegrations
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
)
|
||||
|
||||
// Represents a cloud provider account for cloud integrations
|
||||
type AccountRecord struct {
|
||||
CloudProvider string `json:"cloud_provider" db:"cloud_provider"`
|
||||
Id string `json:"id" db:"id"`
|
||||
Config *AccountConfig `json:"config" db:"config_json"`
|
||||
CloudAccountId *string `json:"cloud_account_id" db:"cloud_account_id"`
|
||||
LastAgentReport *AgentReport `json:"last_agent_report" db:"last_agent_report_json"`
|
||||
CreatedAt time.Time `json:"created_at" db:"created_at"`
|
||||
RemovedAt *time.Time `json:"removed_at" db:"removed_at"`
|
||||
}
|
||||
|
||||
type AccountConfig struct {
|
||||
EnabledRegions []string `json:"regions"`
|
||||
}
|
||||
|
||||
func DefaultAccountConfig() AccountConfig {
|
||||
return AccountConfig{
|
||||
EnabledRegions: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *AccountConfig) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, &c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *AccountConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize cloud account config to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type AgentReport struct {
|
||||
TimestampMillis int64 `json:"timestamp_millis"`
|
||||
Data map[string]any `json:"data"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (r *AgentReport) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, &r)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (r *AgentReport) Value() (driver.Value, error) {
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize agent report to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type AccountStatus struct {
|
||||
Integration AccountIntegrationStatus `json:"integration"`
|
||||
}
|
||||
|
||||
type AccountIntegrationStatus struct {
|
||||
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
||||
}
|
||||
|
||||
func (a *AccountRecord) status() AccountStatus {
|
||||
status := AccountStatus{}
|
||||
if a.LastAgentReport != nil {
|
||||
lastHeartbeat := a.LastAgentReport.TimestampMillis
|
||||
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (a *AccountRecord) account() Account {
|
||||
ca := Account{Id: a.Id, Status: a.status()}
|
||||
|
||||
if a.CloudAccountId != nil {
|
||||
ca.CloudAccountId = *a.CloudAccountId
|
||||
}
|
||||
|
||||
if a.Config != nil {
|
||||
ca.Config = *a.Config
|
||||
} else {
|
||||
ca.Config = DefaultAccountConfig()
|
||||
}
|
||||
|
||||
return ca
|
||||
}
|
||||
|
||||
type CloudServiceSummary struct {
|
||||
Id string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
@@ -125,7 +13,7 @@ type CloudServiceSummary struct {
|
||||
|
||||
// Present only if the service has been configured in the
|
||||
// context of a cloud provider account.
|
||||
Config *CloudServiceConfig `json:"config,omitempty"`
|
||||
Config *types.CloudServiceConfig `json:"config,omitempty"`
|
||||
}
|
||||
|
||||
type CloudServiceDetails struct {
|
||||
@@ -144,44 +32,6 @@ type CloudServiceDetails struct {
|
||||
TelemetryCollectionStrategy *CloudTelemetryCollectionStrategy `json:"telemetry_collection_strategy"`
|
||||
}
|
||||
|
||||
type CloudServiceConfig struct {
|
||||
Logs *CloudServiceLogsConfig `json:"logs,omitempty"`
|
||||
Metrics *CloudServiceMetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *CloudServiceConfig) Scan(src any) error {
|
||||
data, ok := src.([]byte)
|
||||
if !ok {
|
||||
return fmt.Errorf("tried to scan from %T instead of bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, &c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize cloud service config to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type CloudServiceLogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceMetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceAssets struct {
|
||||
Dashboards []CloudServiceDashboard `json:"dashboards"`
|
||||
}
|
||||
|
||||
@@ -4,161 +4,161 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type serviceConfigRepository interface {
|
||||
get(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
) (*CloudServiceConfig, *model.ApiError)
|
||||
serviceType string,
|
||||
) (*types.CloudServiceConfig, *model.ApiError)
|
||||
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config CloudServiceConfig,
|
||||
) (*CloudServiceConfig, *model.ApiError)
|
||||
config types.CloudServiceConfig,
|
||||
) (*types.CloudServiceConfig, *model.ApiError)
|
||||
|
||||
getAllForAccount(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
) (
|
||||
configsBySvcId map[string]*CloudServiceConfig,
|
||||
configsBySvcId map[string]*types.CloudServiceConfig,
|
||||
apiErr *model.ApiError,
|
||||
)
|
||||
}
|
||||
|
||||
func newServiceConfigRepository(db *sqlx.DB) (
|
||||
func newServiceConfigRepository(store sqlstore.SQLStore) (
|
||||
*serviceConfigSQLRepository, error,
|
||||
) {
|
||||
return &serviceConfigSQLRepository{
|
||||
db: db,
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type serviceConfigSQLRepository struct {
|
||||
db *sqlx.DB
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) get(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
) (*CloudServiceConfig, *model.ApiError) {
|
||||
serviceType string,
|
||||
) (*types.CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
var result CloudServiceConfig
|
||||
var result types.CloudIntegrationService
|
||||
|
||||
err := r.db.GetContext(
|
||||
ctx, &result, `
|
||||
select
|
||||
config_json
|
||||
from cloud_integrations_service_configs
|
||||
where
|
||||
cloud_provider=$1
|
||||
and cloud_account_id=$2
|
||||
and service_id=$3
|
||||
`,
|
||||
cloudProvider, cloudAccountId, serviceId,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
Join("JOIN cloud_integration ci ON ci.id = cis.cloud_integration_id").
|
||||
Where("ci.org_id = ?", orgID).
|
||||
Where("ci.id = ?", cloudAccountId).
|
||||
Where("cis.type = ?", serviceType).
|
||||
Scan(ctx)
|
||||
|
||||
if err == sql.ErrNoRows {
|
||||
return nil, model.NotFoundError(fmt.Errorf(
|
||||
"couldn't find %s %s config for %s",
|
||||
cloudProvider, serviceId, cloudAccountId,
|
||||
"couldn't find config for cloud account %s",
|
||||
cloudAccountId,
|
||||
))
|
||||
|
||||
} else if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't query cloud service config: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
return &result.Config, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) upsert(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config CloudServiceConfig,
|
||||
) (*CloudServiceConfig, *model.ApiError) {
|
||||
config types.CloudServiceConfig,
|
||||
) (*types.CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
query := `
|
||||
INSERT INTO cloud_integrations_service_configs (
|
||||
cloud_provider,
|
||||
cloud_account_id,
|
||||
service_id,
|
||||
config_json
|
||||
) values ($1, $2, $3, $4)
|
||||
on conflict(cloud_provider, cloud_account_id, service_id)
|
||||
do update set config_json=excluded.config_json
|
||||
`
|
||||
_, dbErr := r.db.ExecContext(
|
||||
ctx, query,
|
||||
cloudProvider, cloudAccountId, serviceId, &config,
|
||||
)
|
||||
if dbErr != nil {
|
||||
// get cloud integration id from account id
|
||||
// if the account is not connected, we don't need to upsert the config
|
||||
var cloudIntegrationId string
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model((*types.CloudIntegration)(nil)).
|
||||
Column("id").
|
||||
Where("provider = ?", cloudProvider).
|
||||
Where("account_id = ?", cloudAccountId).
|
||||
Where("org_id = ?", orgID).
|
||||
Where("removed_at is NULL").
|
||||
Where("last_agent_report is not NULL").
|
||||
Scan(ctx, &cloudIntegrationId)
|
||||
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not upsert cloud service config: %w", dbErr,
|
||||
"couldn't query cloud integration id: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
upsertedConfig, apiErr := r.get(ctx, cloudProvider, cloudAccountId, serviceId)
|
||||
if apiErr != nil {
|
||||
serviceConfig := types.CloudIntegrationService{
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
Config: config,
|
||||
Type: serviceId,
|
||||
CloudIntegrationID: cloudIntegrationId,
|
||||
}
|
||||
_, err = r.store.BunDB().NewInsert().
|
||||
Model(&serviceConfig).
|
||||
On("conflict(cloud_integration_id, type) do update set config=excluded.config, updated_at=excluded.updated_at").
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't fetch upserted service config: %w", apiErr.ToError(),
|
||||
"could not upsert cloud service config: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
return upsertedConfig, nil
|
||||
return &serviceConfig.Config, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *serviceConfigSQLRepository) getAllForAccount(
|
||||
ctx context.Context,
|
||||
cloudProvider string,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
) (map[string]*CloudServiceConfig, *model.ApiError) {
|
||||
) (map[string]*types.CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
type ScannedServiceConfigRecord struct {
|
||||
ServiceId string `db:"service_id"`
|
||||
Config CloudServiceConfig `db:"config_json"`
|
||||
}
|
||||
serviceConfigs := []types.CloudIntegrationService{}
|
||||
|
||||
records := []ScannedServiceConfigRecord{}
|
||||
|
||||
err := r.db.SelectContext(
|
||||
ctx, &records, `
|
||||
select
|
||||
service_id,
|
||||
config_json
|
||||
from cloud_integrations_service_configs
|
||||
where
|
||||
cloud_provider=$1
|
||||
and cloud_account_id=$2
|
||||
`,
|
||||
cloudProvider, cloudAccountId,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&serviceConfigs).
|
||||
Join("JOIN cloud_integration ci ON ci.id = cis.cloud_integration_id").
|
||||
Where("ci.id = ?", cloudAccountId).
|
||||
Where("ci.org_id = ?", orgID).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query service configs from db: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
result := map[string]*CloudServiceConfig{}
|
||||
result := map[string]*types.CloudServiceConfig{}
|
||||
|
||||
for _, r := range records {
|
||||
result[r.ServiceId] = &r.Config
|
||||
for _, r := range serviceConfigs {
|
||||
result[r.Type] = &r.Config
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -37,7 +38,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/dashboards"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/explorer"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/inframetrics"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations"
|
||||
queues2 "github.com/SigNoz/signoz/pkg/query-service/app/integrations/messagingQueues/queues"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/integrations/thirdPartyApi"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/logs"
|
||||
@@ -549,6 +549,7 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *AuthMiddleware) {
|
||||
router.HandleFunc("/api/v1/services/list", am.ViewAccess(aH.getServicesList)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/service/top_operations", am.ViewAccess(aH.getTopOperations)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/service/top_level_operations", am.ViewAccess(aH.getServicesTopLevelOps)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(aH.SearchTraces)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/usage", am.ViewAccess(aH.getUsage)).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/dependency_graph", am.ViewAccess(aH.dependencyGraph)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/settings/ttl", am.AdminAccess(aH.setTTL)).Methods(http.MethodPost)
|
||||
@@ -1081,14 +1082,14 @@ func (aH *APIHandler) getDashboards(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
ic := aH.IntegrationsController
|
||||
installedIntegrationDashboards, err := ic.GetDashboardsForInstalledIntegrations(r.Context())
|
||||
installedIntegrationDashboards, err := ic.GetDashboardsForInstalledIntegrations(r.Context(), claims.OrgID)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to get dashboards for installed integrations", zap.Error(err))
|
||||
} else {
|
||||
allDashboards = append(allDashboards, installedIntegrationDashboards...)
|
||||
}
|
||||
|
||||
cloudIntegrationDashboards, err := aH.CloudIntegrationsController.AvailableDashboards(r.Context())
|
||||
cloudIntegrationDashboards, err := aH.CloudIntegrationsController.AvailableDashboards(r.Context(), claims.OrgID)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to get cloud dashboards", zap.Error(err))
|
||||
} else {
|
||||
@@ -1266,7 +1267,7 @@ func (aH *APIHandler) getDashboard(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if aH.CloudIntegrationsController.IsCloudIntegrationDashboardUuid(uuid) {
|
||||
dashboard, apiError = aH.CloudIntegrationsController.GetDashboardById(
|
||||
r.Context(), uuid,
|
||||
r.Context(), claims.OrgID, uuid,
|
||||
)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
@@ -1275,7 +1276,7 @@ func (aH *APIHandler) getDashboard(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
} else {
|
||||
dashboard, apiError = aH.IntegrationsController.GetInstalledIntegrationDashboardById(
|
||||
r.Context(), uuid,
|
||||
r.Context(), claims.OrgID, uuid,
|
||||
)
|
||||
if apiError != nil {
|
||||
RespondError(w, apiError, nil)
|
||||
@@ -1724,6 +1725,22 @@ func (aH *APIHandler) getServicesList(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) SearchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
params, err := ParseSearchTracesParams(r)
|
||||
if err != nil {
|
||||
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading params")
|
||||
return
|
||||
}
|
||||
|
||||
result, err := aH.reader.SearchTraces(r.Context(), params)
|
||||
if aH.HandleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
|
||||
aH.WriteJSON(w, r, result)
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) GetWaterfallSpansForTraceWithMetadata(w http.ResponseWriter, r *http.Request) {
|
||||
traceID := mux.Vars(r)["traceId"]
|
||||
if traceID == "" {
|
||||
@@ -2190,6 +2207,11 @@ func (aH *APIHandler) editUser(w http.ResponseWriter, r *http.Request) {
|
||||
old.ProfilePictureURL = update.ProfilePictureURL
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(old.Email)) {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user cannot be updated"))
|
||||
return
|
||||
}
|
||||
|
||||
_, apiErr = dao.DB().EditUser(ctx, &types.User{
|
||||
ID: old.ID,
|
||||
Name: old.Name,
|
||||
@@ -2221,6 +2243,11 @@ func (aH *APIHandler) deleteUser(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(user.Email)) {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "integration user cannot be updated"))
|
||||
return
|
||||
}
|
||||
|
||||
if user == nil {
|
||||
RespondError(w, &model.ApiError{
|
||||
Typ: model.ErrorNotFound,
|
||||
@@ -3480,9 +3507,14 @@ func (aH *APIHandler) ListIntegrations(
|
||||
for k, values := range r.URL.Query() {
|
||||
params[k] = values[0]
|
||||
}
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, apiErr := aH.IntegrationsController.ListIntegrations(
|
||||
r.Context(), params,
|
||||
r.Context(), claims.OrgID, params,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "Failed to fetch integrations")
|
||||
@@ -3495,8 +3527,13 @@ func (aH *APIHandler) GetIntegration(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
integrationId := mux.Vars(r)["integrationId"]
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
integration, apiErr := aH.IntegrationsController.GetIntegration(
|
||||
r.Context(), integrationId,
|
||||
r.Context(), claims.OrgID, integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "Failed to fetch integration details")
|
||||
@@ -3510,8 +3547,13 @@ func (aH *APIHandler) GetIntegrationConnectionStatus(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
) {
|
||||
integrationId := mux.Vars(r)["integrationId"]
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
isInstalled, apiErr := aH.IntegrationsController.IsIntegrationInstalled(
|
||||
r.Context(), integrationId,
|
||||
r.Context(), claims.OrgID, integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "failed to check if integration is installed")
|
||||
@@ -3525,7 +3567,7 @@ func (aH *APIHandler) GetIntegrationConnectionStatus(
|
||||
}
|
||||
|
||||
connectionTests, apiErr := aH.IntegrationsController.GetIntegrationConnectionTests(
|
||||
r.Context(), integrationId,
|
||||
r.Context(), claims.OrgID, integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, "failed to fetch integration connection tests")
|
||||
@@ -3724,8 +3766,14 @@ func (aH *APIHandler) InstallIntegration(
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
integration, apiErr := aH.IntegrationsController.Install(
|
||||
r.Context(), &req,
|
||||
r.Context(), claims.OrgID, &req,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
@@ -3746,7 +3794,13 @@ func (aH *APIHandler) UninstallIntegration(
|
||||
return
|
||||
}
|
||||
|
||||
apiErr := aH.IntegrationsController.Uninstall(r.Context(), &req)
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
apiErr := aH.IntegrationsController.Uninstall(r.Context(), claims.OrgID, &req)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
return
|
||||
@@ -3802,8 +3856,14 @@ func (aH *APIHandler) CloudIntegrationsListConnectedAccounts(
|
||||
) {
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.ListConnectedAccounts(
|
||||
r.Context(), cloudProvider,
|
||||
r.Context(), claims.OrgID, cloudProvider,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3824,8 +3884,14 @@ func (aH *APIHandler) CloudIntegrationsGenerateConnectionUrl(
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.GenerateConnectionUrl(
|
||||
r.Context(), cloudProvider, req,
|
||||
r.Context(), claims.OrgID, cloudProvider, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3842,8 +3908,14 @@ func (aH *APIHandler) CloudIntegrationsGetAccountStatus(
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
accountId := mux.Vars(r)["accountId"]
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.GetAccountStatus(
|
||||
r.Context(), cloudProvider, accountId,
|
||||
r.Context(), claims.OrgID, cloudProvider, accountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3864,8 +3936,14 @@ func (aH *APIHandler) CloudIntegrationsAgentCheckIn(
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.CheckInAsAgent(
|
||||
r.Context(), cloudProvider, req,
|
||||
r.Context(), claims.OrgID, cloudProvider, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3888,8 +3966,14 @@ func (aH *APIHandler) CloudIntegrationsUpdateAccountConfig(
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.UpdateAccountConfig(
|
||||
r.Context(), cloudProvider, accountId, req,
|
||||
r.Context(), claims.OrgID, cloudProvider, accountId, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3906,8 +3990,14 @@ func (aH *APIHandler) CloudIntegrationsDisconnectAccount(
|
||||
cloudProvider := mux.Vars(r)["cloudProvider"]
|
||||
accountId := mux.Vars(r)["accountId"]
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.DisconnectAccount(
|
||||
r.Context(), cloudProvider, accountId,
|
||||
r.Context(), claims.OrgID, cloudProvider, accountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3930,8 +4020,14 @@ func (aH *APIHandler) CloudIntegrationsListServices(
|
||||
cloudAccountId = &cloudAccountIdQP
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.ListServices(
|
||||
r.Context(), cloudProvider, cloudAccountId,
|
||||
r.Context(), claims.OrgID, cloudProvider, cloudAccountId,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
@@ -3954,8 +4050,14 @@ func (aH *APIHandler) CloudIntegrationsGetServiceDetails(
|
||||
cloudAccountId = &cloudAccountIdQP
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
resp, apiErr := aH.CloudIntegrationsController.GetServiceDetails(
|
||||
r.Context(), cloudProvider, serviceId, cloudAccountId,
|
||||
r.Context(), claims.OrgID, cloudProvider, serviceId, cloudAccountId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
RespondError(w, apiErr, nil)
|
||||
@@ -4194,8 +4296,14 @@ func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(
|
||||
return
|
||||
}
|
||||
|
||||
claims, ok := authtypes.ClaimsFromContext(r.Context())
|
||||
if !ok {
|
||||
render.Error(w, errorsV2.Newf(errorsV2.TypeUnauthenticated, errorsV2.CodeUnauthenticated, "unauthenticated"))
|
||||
return
|
||||
}
|
||||
|
||||
result, apiErr := aH.CloudIntegrationsController.UpdateServiceConfig(
|
||||
r.Context(), cloudProvider, serviceId, req,
|
||||
r.Context(), claims.OrgID, cloudProvider, serviceId, req,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
|
||||
@@ -18,7 +18,7 @@ type Controller struct {
|
||||
func NewController(sqlStore sqlstore.SQLStore) (
|
||||
*Controller, error,
|
||||
) {
|
||||
mgr, err := NewManager(sqlStore.SQLxDB())
|
||||
mgr, err := NewManager(sqlStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't create integrations manager: %w", err)
|
||||
}
|
||||
@@ -35,7 +35,7 @@ type IntegrationsListResponse struct {
|
||||
}
|
||||
|
||||
func (c *Controller) ListIntegrations(
|
||||
ctx context.Context, params map[string]string,
|
||||
ctx context.Context, orgId string, params map[string]string,
|
||||
) (
|
||||
*IntegrationsListResponse, *model.ApiError,
|
||||
) {
|
||||
@@ -47,7 +47,7 @@ func (c *Controller) ListIntegrations(
|
||||
}
|
||||
}
|
||||
|
||||
integrations, apiErr := c.mgr.ListIntegrations(ctx, filters)
|
||||
integrations, apiErr := c.mgr.ListIntegrations(ctx, orgId, filters)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -58,16 +58,15 @@ func (c *Controller) ListIntegrations(
|
||||
}
|
||||
|
||||
func (c *Controller) GetIntegration(
|
||||
ctx context.Context, integrationId string,
|
||||
ctx context.Context, orgId string, integrationId string,
|
||||
) (*Integration, *model.ApiError) {
|
||||
return c.mgr.GetIntegration(ctx, integrationId)
|
||||
return c.mgr.GetIntegration(ctx, orgId, integrationId)
|
||||
}
|
||||
|
||||
func (c *Controller) IsIntegrationInstalled(
|
||||
ctx context.Context,
|
||||
integrationId string,
|
||||
ctx context.Context, orgId string, integrationId string,
|
||||
) (bool, *model.ApiError) {
|
||||
installation, apiErr := c.mgr.getInstalledIntegration(ctx, integrationId)
|
||||
installation, apiErr := c.mgr.getInstalledIntegration(ctx, orgId, integrationId)
|
||||
if apiErr != nil {
|
||||
return false, apiErr
|
||||
}
|
||||
@@ -76,9 +75,9 @@ func (c *Controller) IsIntegrationInstalled(
|
||||
}
|
||||
|
||||
func (c *Controller) GetIntegrationConnectionTests(
|
||||
ctx context.Context, integrationId string,
|
||||
ctx context.Context, orgId string, integrationId string,
|
||||
) (*IntegrationConnectionTests, *model.ApiError) {
|
||||
return c.mgr.GetIntegrationConnectionTests(ctx, integrationId)
|
||||
return c.mgr.GetIntegrationConnectionTests(ctx, orgId, integrationId)
|
||||
}
|
||||
|
||||
type InstallIntegrationRequest struct {
|
||||
@@ -87,10 +86,10 @@ type InstallIntegrationRequest struct {
|
||||
}
|
||||
|
||||
func (c *Controller) Install(
|
||||
ctx context.Context, req *InstallIntegrationRequest,
|
||||
ctx context.Context, orgId string, req *InstallIntegrationRequest,
|
||||
) (*IntegrationsListItem, *model.ApiError) {
|
||||
res, apiErr := c.mgr.InstallIntegration(
|
||||
ctx, req.IntegrationId, req.Config,
|
||||
ctx, orgId, req.IntegrationId, req.Config,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
@@ -104,7 +103,7 @@ type UninstallIntegrationRequest struct {
|
||||
}
|
||||
|
||||
func (c *Controller) Uninstall(
|
||||
ctx context.Context, req *UninstallIntegrationRequest,
|
||||
ctx context.Context, orgId string, req *UninstallIntegrationRequest,
|
||||
) *model.ApiError {
|
||||
if len(req.IntegrationId) < 1 {
|
||||
return model.BadRequest(fmt.Errorf(
|
||||
@@ -113,7 +112,7 @@ func (c *Controller) Uninstall(
|
||||
}
|
||||
|
||||
apiErr := c.mgr.UninstallIntegration(
|
||||
ctx, req.IntegrationId,
|
||||
ctx, orgId, req.IntegrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return apiErr
|
||||
@@ -123,19 +122,19 @@ func (c *Controller) Uninstall(
|
||||
}
|
||||
|
||||
func (c *Controller) GetPipelinesForInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
ctx context.Context, orgId string,
|
||||
) ([]pipelinetypes.GettablePipeline, *model.ApiError) {
|
||||
return c.mgr.GetPipelinesForInstalledIntegrations(ctx)
|
||||
return c.mgr.GetPipelinesForInstalledIntegrations(ctx, orgId)
|
||||
}
|
||||
|
||||
func (c *Controller) GetDashboardsForInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
ctx context.Context, orgId string,
|
||||
) ([]types.Dashboard, *model.ApiError) {
|
||||
return c.mgr.GetDashboardsForInstalledIntegrations(ctx)
|
||||
return c.mgr.GetDashboardsForInstalledIntegrations(ctx, orgId)
|
||||
}
|
||||
|
||||
func (c *Controller) GetInstalledIntegrationDashboardById(
|
||||
ctx context.Context, dashboardUuid string,
|
||||
ctx context.Context, orgId string, dashboardUuid string,
|
||||
) (*types.Dashboard, *model.ApiError) {
|
||||
return c.mgr.GetInstalledIntegrationDashboardById(ctx, dashboardUuid)
|
||||
return c.mgr.GetInstalledIntegrationDashboardById(ctx, orgId, dashboardUuid)
|
||||
}
|
||||
|
||||
@@ -5,15 +5,14 @@ import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type IntegrationAuthor struct {
|
||||
@@ -105,16 +104,9 @@ type IntegrationsListItem struct {
|
||||
IsInstalled bool `json:"is_installed"`
|
||||
}
|
||||
|
||||
type InstalledIntegration struct {
|
||||
IntegrationId string `json:"integration_id" db:"integration_id"`
|
||||
Config InstalledIntegrationConfig `json:"config_json" db:"config_json"`
|
||||
InstalledAt time.Time `json:"installed_at" db:"installed_at"`
|
||||
}
|
||||
type InstalledIntegrationConfig map[string]interface{}
|
||||
|
||||
type Integration struct {
|
||||
IntegrationDetails
|
||||
Installation *InstalledIntegration `json:"installation"`
|
||||
Installation *types.InstalledIntegration `json:"installation"`
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
@@ -122,8 +114,8 @@ type Manager struct {
|
||||
installedIntegrationsRepo InstalledIntegrationsRepo
|
||||
}
|
||||
|
||||
func NewManager(db *sqlx.DB) (*Manager, error) {
|
||||
iiRepo, err := NewInstalledIntegrationsSqliteRepo(db)
|
||||
func NewManager(store sqlstore.SQLStore) (*Manager, error) {
|
||||
iiRepo, err := NewInstalledIntegrationsSqliteRepo(store)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"could not init sqlite DB for installed integrations: %w", err,
|
||||
@@ -142,6 +134,7 @@ type IntegrationsFilter struct {
|
||||
|
||||
func (m *Manager) ListIntegrations(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
filter *IntegrationsFilter,
|
||||
// Expected to have pagination over time.
|
||||
) ([]IntegrationsListItem, *model.ApiError) {
|
||||
@@ -152,22 +145,22 @@ func (m *Manager) ListIntegrations(
|
||||
)
|
||||
}
|
||||
|
||||
installed, apiErr := m.installedIntegrationsRepo.list(ctx)
|
||||
installed, apiErr := m.installedIntegrationsRepo.list(ctx, orgId)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, "could not fetch installed integrations",
|
||||
)
|
||||
}
|
||||
installedIds := []string{}
|
||||
installedTypes := []string{}
|
||||
for _, ii := range installed {
|
||||
installedIds = append(installedIds, ii.IntegrationId)
|
||||
installedTypes = append(installedTypes, ii.Type)
|
||||
}
|
||||
|
||||
result := []IntegrationsListItem{}
|
||||
for _, ai := range available {
|
||||
result = append(result, IntegrationsListItem{
|
||||
IntegrationSummary: ai.IntegrationSummary,
|
||||
IsInstalled: slices.Contains(installedIds, ai.Id),
|
||||
IsInstalled: slices.Contains(installedTypes, ai.Id),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -188,6 +181,7 @@ func (m *Manager) ListIntegrations(
|
||||
|
||||
func (m *Manager) GetIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
) (*Integration, *model.ApiError) {
|
||||
integrationDetails, apiErr := m.getIntegrationDetails(
|
||||
@@ -198,7 +192,7 @@ func (m *Manager) GetIntegration(
|
||||
}
|
||||
|
||||
installation, apiErr := m.getInstalledIntegration(
|
||||
ctx, integrationId,
|
||||
ctx, orgId, integrationId,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
@@ -212,6 +206,7 @@ func (m *Manager) GetIntegration(
|
||||
|
||||
func (m *Manager) GetIntegrationConnectionTests(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
) (*IntegrationConnectionTests, *model.ApiError) {
|
||||
integrationDetails, apiErr := m.getIntegrationDetails(
|
||||
@@ -225,8 +220,9 @@ func (m *Manager) GetIntegrationConnectionTests(
|
||||
|
||||
func (m *Manager) InstallIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
config InstalledIntegrationConfig,
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*IntegrationsListItem, *model.ApiError) {
|
||||
integrationDetails, apiErr := m.getIntegrationDetails(ctx, integrationId)
|
||||
if apiErr != nil {
|
||||
@@ -234,7 +230,7 @@ func (m *Manager) InstallIntegration(
|
||||
}
|
||||
|
||||
_, apiErr = m.installedIntegrationsRepo.upsert(
|
||||
ctx, integrationId, config,
|
||||
ctx, orgId, integrationId, config,
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
@@ -250,15 +246,17 @@ func (m *Manager) InstallIntegration(
|
||||
|
||||
func (m *Manager) UninstallIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
) *model.ApiError {
|
||||
return m.installedIntegrationsRepo.delete(ctx, integrationId)
|
||||
return m.installedIntegrationsRepo.delete(ctx, orgId, integrationId)
|
||||
}
|
||||
|
||||
func (m *Manager) GetPipelinesForInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
) ([]pipelinetypes.GettablePipeline, *model.ApiError) {
|
||||
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx)
|
||||
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx, orgId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -308,6 +306,7 @@ func (m *Manager) parseDashboardUuid(dashboardUuid string) (
|
||||
|
||||
func (m *Manager) GetInstalledIntegrationDashboardById(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
dashboardUuid string,
|
||||
) (*types.Dashboard, *model.ApiError) {
|
||||
integrationId, dashboardId, apiErr := m.parseDashboardUuid(dashboardUuid)
|
||||
@@ -315,7 +314,7 @@ func (m *Manager) GetInstalledIntegrationDashboardById(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
integration, apiErr := m.GetIntegration(ctx, integrationId)
|
||||
integration, apiErr := m.GetIntegration(ctx, orgId, integrationId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -355,8 +354,9 @@ func (m *Manager) GetInstalledIntegrationDashboardById(
|
||||
|
||||
func (m *Manager) GetDashboardsForInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
) ([]types.Dashboard, *model.ApiError) {
|
||||
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx)
|
||||
installedIntegrations, apiErr := m.getInstalledIntegrations(ctx, orgId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -421,10 +421,11 @@ func (m *Manager) getIntegrationDetails(
|
||||
|
||||
func (m *Manager) getInstalledIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
) (*InstalledIntegration, *model.ApiError) {
|
||||
) (*types.InstalledIntegration, *model.ApiError) {
|
||||
iis, apiErr := m.installedIntegrationsRepo.get(
|
||||
ctx, []string{integrationId},
|
||||
ctx, orgId, []string{integrationId},
|
||||
)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(apiErr, fmt.Sprintf(
|
||||
@@ -441,32 +442,33 @@ func (m *Manager) getInstalledIntegration(
|
||||
|
||||
func (m *Manager) getInstalledIntegrations(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
) (
|
||||
map[string]Integration, *model.ApiError,
|
||||
) {
|
||||
installations, apiErr := m.installedIntegrationsRepo.list(ctx)
|
||||
installations, apiErr := m.installedIntegrationsRepo.list(ctx, orgId)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
installedIds := utils.MapSlice(installations, func(i InstalledIntegration) string {
|
||||
return i.IntegrationId
|
||||
installedTypes := utils.MapSlice(installations, func(i types.InstalledIntegration) string {
|
||||
return i.Type
|
||||
})
|
||||
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedIds)
|
||||
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedTypes)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
result := map[string]Integration{}
|
||||
for _, ii := range installations {
|
||||
iDetails, exists := integrationDetails[ii.IntegrationId]
|
||||
iDetails, exists := integrationDetails[ii.Type]
|
||||
if !exists {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"couldn't find integration details for %s", ii.IntegrationId,
|
||||
"couldn't find integration details for %s", ii.Type,
|
||||
))
|
||||
}
|
||||
|
||||
result[ii.IntegrationId] = Integration{
|
||||
result[ii.Type] = Integration{
|
||||
Installation: &ii,
|
||||
IntegrationDetails: iDetails,
|
||||
}
|
||||
|
||||
@@ -14,18 +14,23 @@ func TestIntegrationLifecycle(t *testing.T) {
|
||||
mgr := NewTestIntegrationsManager(t)
|
||||
ctx := context.Background()
|
||||
|
||||
user, apiErr := createTestUser()
|
||||
if apiErr != nil {
|
||||
t.Fatalf("could not create test user: %v", apiErr)
|
||||
}
|
||||
|
||||
ii := true
|
||||
installedIntegrationsFilter := &IntegrationsFilter{
|
||||
IsInstalled: &ii,
|
||||
}
|
||||
|
||||
installedIntegrations, apiErr := mgr.ListIntegrations(
|
||||
ctx, installedIntegrationsFilter,
|
||||
ctx, user.OrgID, installedIntegrationsFilter,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal([]IntegrationsListItem{}, installedIntegrations)
|
||||
|
||||
availableIntegrations, apiErr := mgr.ListIntegrations(ctx, nil)
|
||||
availableIntegrations, apiErr := mgr.ListIntegrations(ctx, user.OrgID, nil)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(2, len(availableIntegrations))
|
||||
require.False(availableIntegrations[0].IsInstalled)
|
||||
@@ -33,44 +38,44 @@ func TestIntegrationLifecycle(t *testing.T) {
|
||||
|
||||
testIntegrationConfig := map[string]interface{}{}
|
||||
installed, apiErr := mgr.InstallIntegration(
|
||||
ctx, availableIntegrations[1].Id, testIntegrationConfig,
|
||||
ctx, user.OrgID, availableIntegrations[1].Id, testIntegrationConfig,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(installed.Id, availableIntegrations[1].Id)
|
||||
|
||||
integration, apiErr := mgr.GetIntegration(ctx, availableIntegrations[1].Id)
|
||||
integration, apiErr := mgr.GetIntegration(ctx, user.OrgID, availableIntegrations[1].Id)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(integration.Id, availableIntegrations[1].Id)
|
||||
require.NotNil(integration.Installation)
|
||||
|
||||
installedIntegrations, apiErr = mgr.ListIntegrations(
|
||||
ctx, installedIntegrationsFilter,
|
||||
ctx, user.OrgID, installedIntegrationsFilter,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(1, len(installedIntegrations))
|
||||
require.Equal(availableIntegrations[1].Id, installedIntegrations[0].Id)
|
||||
|
||||
availableIntegrations, apiErr = mgr.ListIntegrations(ctx, nil)
|
||||
availableIntegrations, apiErr = mgr.ListIntegrations(ctx, user.OrgID, nil)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(2, len(availableIntegrations))
|
||||
require.False(availableIntegrations[0].IsInstalled)
|
||||
require.True(availableIntegrations[1].IsInstalled)
|
||||
|
||||
apiErr = mgr.UninstallIntegration(ctx, installed.Id)
|
||||
apiErr = mgr.UninstallIntegration(ctx, user.OrgID, installed.Id)
|
||||
require.Nil(apiErr)
|
||||
|
||||
integration, apiErr = mgr.GetIntegration(ctx, availableIntegrations[1].Id)
|
||||
integration, apiErr = mgr.GetIntegration(ctx, user.OrgID, availableIntegrations[1].Id)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(integration.Id, availableIntegrations[1].Id)
|
||||
require.Nil(integration.Installation)
|
||||
|
||||
installedIntegrations, apiErr = mgr.ListIntegrations(
|
||||
ctx, installedIntegrationsFilter,
|
||||
ctx, user.OrgID, installedIntegrationsFilter,
|
||||
)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(0, len(installedIntegrations))
|
||||
|
||||
availableIntegrations, apiErr = mgr.ListIntegrations(ctx, nil)
|
||||
availableIntegrations, apiErr = mgr.ListIntegrations(ctx, user.OrgID, nil)
|
||||
require.Nil(apiErr)
|
||||
require.Equal(2, len(availableIntegrations))
|
||||
require.False(availableIntegrations[0].IsInstalled)
|
||||
|
||||
@@ -2,51 +2,33 @@ package integrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
)
|
||||
|
||||
// For serializing from db
|
||||
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
|
||||
if data, ok := src.([]byte); ok {
|
||||
return json.Unmarshal(data, &c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
|
||||
filterSetJson, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not serialize integration config to JSON")
|
||||
}
|
||||
return filterSetJson, nil
|
||||
}
|
||||
|
||||
type InstalledIntegrationsRepo interface {
|
||||
list(context.Context) ([]InstalledIntegration, *model.ApiError)
|
||||
list(ctx context.Context, orgId string) ([]types.InstalledIntegration, *model.ApiError)
|
||||
|
||||
get(
|
||||
ctx context.Context, integrationIds []string,
|
||||
) (map[string]InstalledIntegration, *model.ApiError)
|
||||
ctx context.Context, orgId string, integrationTypes []string,
|
||||
) (map[string]types.InstalledIntegration, *model.ApiError)
|
||||
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
integrationId string,
|
||||
config InstalledIntegrationConfig,
|
||||
) (*InstalledIntegration, *model.ApiError)
|
||||
orgId string,
|
||||
integrationType string,
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*types.InstalledIntegration, *model.ApiError)
|
||||
|
||||
delete(ctx context.Context, integrationId string) *model.ApiError
|
||||
delete(ctx context.Context, orgId string, integrationType string) *model.ApiError
|
||||
}
|
||||
|
||||
type AvailableIntegrationsRepo interface {
|
||||
list(context.Context) ([]IntegrationDetails, *model.ApiError)
|
||||
|
||||
get(
|
||||
ctx context.Context, integrationIds []string,
|
||||
ctx context.Context, integrationTypes []string,
|
||||
) (map[string]IntegrationDetails, *model.ApiError)
|
||||
|
||||
// AvailableIntegrationsRepo implementations are expected to cache
|
||||
|
||||
@@ -3,39 +3,37 @@ package integrations
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/jmoiron/sqlx"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type InstalledIntegrationsSqliteRepo struct {
|
||||
db *sqlx.DB
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func NewInstalledIntegrationsSqliteRepo(db *sqlx.DB) (
|
||||
func NewInstalledIntegrationsSqliteRepo(store sqlstore.SQLStore) (
|
||||
*InstalledIntegrationsSqliteRepo, error,
|
||||
) {
|
||||
return &InstalledIntegrationsSqliteRepo{
|
||||
db: db,
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) list(
|
||||
ctx context.Context,
|
||||
) ([]InstalledIntegration, *model.ApiError) {
|
||||
integrations := []InstalledIntegration{}
|
||||
orgId string,
|
||||
) ([]types.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []types.InstalledIntegration{}
|
||||
|
||||
err := r.db.SelectContext(
|
||||
ctx, &integrations, `
|
||||
select
|
||||
integration_id,
|
||||
config_json,
|
||||
installed_at
|
||||
from integrations_installed
|
||||
order by installed_at
|
||||
`,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&integrations).
|
||||
Where("org_id = ?", orgId).
|
||||
Order("installed_at").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query installed integrations: %w", err,
|
||||
@@ -45,38 +43,28 @@ func (r *InstalledIntegrationsSqliteRepo) list(
|
||||
}
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) get(
|
||||
ctx context.Context, integrationIds []string,
|
||||
) (map[string]InstalledIntegration, *model.ApiError) {
|
||||
integrations := []InstalledIntegration{}
|
||||
ctx context.Context, orgId string, integrationTypes []string,
|
||||
) (map[string]types.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []types.InstalledIntegration{}
|
||||
|
||||
idPlaceholders := []string{}
|
||||
idValues := []interface{}{}
|
||||
for _, id := range integrationIds {
|
||||
idPlaceholders = append(idPlaceholders, "?")
|
||||
idValues = append(idValues, id)
|
||||
typeValues := []interface{}{}
|
||||
for _, integrationType := range integrationTypes {
|
||||
typeValues = append(typeValues, integrationType)
|
||||
}
|
||||
|
||||
err := r.db.SelectContext(
|
||||
ctx, &integrations, fmt.Sprintf(`
|
||||
select
|
||||
integration_id,
|
||||
config_json,
|
||||
installed_at
|
||||
from integrations_installed
|
||||
where integration_id in (%s)`,
|
||||
strings.Join(idPlaceholders, ", "),
|
||||
),
|
||||
idValues...,
|
||||
)
|
||||
err := r.store.BunDB().NewSelect().Model(&integrations).
|
||||
Where("org_id = ?", orgId).
|
||||
Where("type IN (?)", bun.In(typeValues)).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not query installed integrations: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
result := map[string]InstalledIntegration{}
|
||||
result := map[string]types.InstalledIntegration{}
|
||||
for _, ii := range integrations {
|
||||
result[ii.IntegrationId] = ii
|
||||
result[ii.Type] = ii
|
||||
}
|
||||
|
||||
return result, nil
|
||||
@@ -84,55 +72,57 @@ func (r *InstalledIntegrationsSqliteRepo) get(
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) upsert(
|
||||
ctx context.Context,
|
||||
integrationId string,
|
||||
config InstalledIntegrationConfig,
|
||||
) (*InstalledIntegration, *model.ApiError) {
|
||||
serializedConfig, err := config.Value()
|
||||
if err != nil {
|
||||
return nil, model.BadRequest(fmt.Errorf(
|
||||
"could not serialize integration config: %w", err,
|
||||
))
|
||||
orgId string,
|
||||
integrationType string,
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*types.InstalledIntegration, *model.ApiError) {
|
||||
|
||||
integration := types.InstalledIntegration{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
OrgID: orgId,
|
||||
Type: integrationType,
|
||||
Config: config,
|
||||
}
|
||||
|
||||
_, dbErr := r.db.ExecContext(
|
||||
ctx, `
|
||||
INSERT INTO integrations_installed (
|
||||
integration_id,
|
||||
config_json
|
||||
) values ($1, $2)
|
||||
on conflict(integration_id) do update
|
||||
set config_json=excluded.config_json
|
||||
`, integrationId, serializedConfig,
|
||||
)
|
||||
_, dbErr := r.store.BunDB().NewInsert().
|
||||
Model(&integration).
|
||||
On("conflict (type, org_id) DO UPDATE").
|
||||
Set("config = EXCLUDED.config").
|
||||
Exec(ctx)
|
||||
|
||||
if dbErr != nil {
|
||||
return nil, model.InternalError(fmt.Errorf(
|
||||
"could not insert record for integration installation: %w", dbErr,
|
||||
))
|
||||
}
|
||||
|
||||
res, apiErr := r.get(ctx, []string{integrationId})
|
||||
res, apiErr := r.get(ctx, orgId, []string{integrationType})
|
||||
if apiErr != nil || len(res) < 1 {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, "could not fetch installed integration",
|
||||
)
|
||||
}
|
||||
|
||||
installed := res[integrationId]
|
||||
installed := res[integrationType]
|
||||
|
||||
return &installed, nil
|
||||
}
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) delete(
|
||||
ctx context.Context, integrationId string,
|
||||
ctx context.Context, orgId string, integrationType string,
|
||||
) *model.ApiError {
|
||||
_, dbErr := r.db.ExecContext(ctx, `
|
||||
DELETE FROM integrations_installed where integration_id = ?
|
||||
`, integrationId)
|
||||
_, dbErr := r.store.BunDB().NewDelete().
|
||||
Model(&types.InstalledIntegration{}).
|
||||
Where("type = ?", integrationType).
|
||||
Where("org_id = ?", orgId).
|
||||
Exec(ctx)
|
||||
|
||||
if dbErr != nil {
|
||||
return model.InternalError(fmt.Errorf(
|
||||
"could not delete installed integration record for %s: %w",
|
||||
integrationId, dbErr,
|
||||
integrationType, dbErr,
|
||||
))
|
||||
}
|
||||
|
||||
|
||||
@@ -5,18 +5,22 @@ import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/auth"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/dao"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
v3 "github.com/SigNoz/signoz/pkg/query-service/model/v3"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func NewTestIntegrationsManager(t *testing.T) *Manager {
|
||||
testDB := utils.NewQueryServiceDBForTests(t)
|
||||
|
||||
installedIntegrationsRepo, err := NewInstalledIntegrationsSqliteRepo(testDB.SQLxDB())
|
||||
installedIntegrationsRepo, err := NewInstalledIntegrationsSqliteRepo(testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("could not init sqlite DB for installed integrations: %v", err)
|
||||
}
|
||||
@@ -27,6 +31,38 @@ func NewTestIntegrationsManager(t *testing.T) *Manager {
|
||||
}
|
||||
}
|
||||
|
||||
func createTestUser() (*types.User, *model.ApiError) {
|
||||
// Create a test user for auth
|
||||
ctx := context.Background()
|
||||
org, apiErr := dao.DB().CreateOrg(ctx, &types.Organization{
|
||||
Name: "test",
|
||||
})
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
group, apiErr := dao.DB().GetGroupByName(ctx, constants.AdminGroup)
|
||||
if apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
auth.InitAuthCache(ctx)
|
||||
|
||||
userId := uuid.NewString()
|
||||
return dao.DB().CreateUser(
|
||||
ctx,
|
||||
&types.User{
|
||||
ID: userId,
|
||||
Name: "test",
|
||||
Email: userId[:8] + "test@test.com",
|
||||
Password: "test",
|
||||
OrgID: org.ID,
|
||||
GroupID: group.ID,
|
||||
},
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
type TestAvailableIntegrationsRepo struct{}
|
||||
|
||||
func (t *TestAvailableIntegrationsRepo) list(
|
||||
|
||||
@@ -25,12 +25,12 @@ import (
|
||||
type LogParsingPipelineController struct {
|
||||
Repo
|
||||
|
||||
GetIntegrationPipelines func(context.Context) ([]pipelinetypes.GettablePipeline, *model.ApiError)
|
||||
GetIntegrationPipelines func(context.Context, string) ([]pipelinetypes.GettablePipeline, *model.ApiError)
|
||||
}
|
||||
|
||||
func NewLogParsingPipelinesController(
|
||||
sqlStore sqlstore.SQLStore,
|
||||
getIntegrationPipelines func(context.Context) ([]pipelinetypes.GettablePipeline, *model.ApiError),
|
||||
getIntegrationPipelines func(context.Context, string) ([]pipelinetypes.GettablePipeline, *model.ApiError),
|
||||
) (*LogParsingPipelineController, error) {
|
||||
repo := NewRepo(sqlStore)
|
||||
return &LogParsingPipelineController{
|
||||
@@ -164,7 +164,7 @@ func (ic *LogParsingPipelineController) getEffectivePipelinesByVersion(
|
||||
result = savedPipelines
|
||||
}
|
||||
|
||||
integrationPipelines, apiErr := ic.GetIntegrationPipelines(ctx)
|
||||
integrationPipelines, apiErr := ic.GetIntegrationPipelines(ctx, defaultOrgID)
|
||||
if apiErr != nil {
|
||||
return nil, model.WrapApiError(
|
||||
apiErr, "could not get pipelines for installed integrations",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package model
|
||||
package smart
|
||||
|
||||
type SpanForTraceDetails struct {
|
||||
TimeUnixNano uint64 `json:"timestamp"`
|
||||
@@ -15,8 +15,3 @@ type SpanForTraceDetails struct {
|
||||
HasError bool `json:"hasError"`
|
||||
Children []*SpanForTraceDetails `json:"children"`
|
||||
}
|
||||
|
||||
type GetSpansSubQueryDBResponse struct {
|
||||
SpanID string `ch:"spanID"`
|
||||
TraceID string `ch:"traceID"`
|
||||
}
|
||||
@@ -1,17 +1,16 @@
|
||||
package db
|
||||
package smart
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/model"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// SmartTraceAlgorithm is an algorithm to find the target span and build a tree of spans around it with the given levelUp and levelDown parameters and the given spanLimit
|
||||
func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanId string, levelUp int, levelDown int, spanLimit int) ([]basemodel.SearchSpansResult, error) {
|
||||
var spans []*model.SpanForTraceDetails
|
||||
var spans []*SpanForTraceDetails
|
||||
|
||||
// if targetSpanId is null or not present then randomly select a span as targetSpanId
|
||||
if (targetSpanId == "" || targetSpanId == "null") && len(payload) > 0 {
|
||||
@@ -24,7 +23,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
if len(spanItem.References) > 0 && spanItem.References[0].RefType == "CHILD_OF" {
|
||||
parentID = spanItem.References[0].SpanId
|
||||
}
|
||||
span := &model.SpanForTraceDetails{
|
||||
span := &SpanForTraceDetails{
|
||||
TimeUnixNano: spanItem.TimeUnixNano,
|
||||
SpanID: spanItem.SpanID,
|
||||
TraceID: spanItem.TraceID,
|
||||
@@ -45,7 +44,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetSpan := &model.SpanForTraceDetails{}
|
||||
targetSpan := &SpanForTraceDetails{}
|
||||
|
||||
// Find the target span in the span trees
|
||||
for _, root := range roots {
|
||||
@@ -65,7 +64,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Build the final result
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
parents := []*SpanForTraceDetails{}
|
||||
|
||||
// Get the parent spans of the target span up to the given levelUp parameter and spanLimit
|
||||
preParent := targetSpan
|
||||
@@ -90,11 +89,11 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Get the child spans of the target span until the given levelDown and spanLimit
|
||||
preParents := []*model.SpanForTraceDetails{targetSpan}
|
||||
children := []*model.SpanForTraceDetails{}
|
||||
preParents := []*SpanForTraceDetails{targetSpan}
|
||||
children := []*SpanForTraceDetails{}
|
||||
|
||||
for i := 0; i < levelDown && len(preParents) != 0 && spanLimit > 0; i++ {
|
||||
parents := []*model.SpanForTraceDetails{}
|
||||
parents := []*SpanForTraceDetails{}
|
||||
for _, parent := range preParents {
|
||||
if spanLimit-len(parent.Children) <= 0 {
|
||||
children = append(children, parent.Children[:spanLimit]...)
|
||||
@@ -108,7 +107,7 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// Store the final list of spans in the resultSpanSet map to avoid duplicates
|
||||
resultSpansSet := make(map[*model.SpanForTraceDetails]struct{})
|
||||
resultSpansSet := make(map[*SpanForTraceDetails]struct{})
|
||||
resultSpansSet[targetSpan] = struct{}{}
|
||||
for _, parent := range parents {
|
||||
resultSpansSet[parent] = struct{}{}
|
||||
@@ -169,12 +168,12 @@ func SmartTraceAlgorithm(payload []basemodel.SearchSpanResponseItem, targetSpanI
|
||||
}
|
||||
|
||||
// buildSpanTrees builds trees of spans from a list of spans.
|
||||
func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTraceDetails, error) {
|
||||
func buildSpanTrees(spansPtr *[]*SpanForTraceDetails) ([]*SpanForTraceDetails, error) {
|
||||
|
||||
// Build a map of spanID to span for fast lookup
|
||||
var roots []*model.SpanForTraceDetails
|
||||
var roots []*SpanForTraceDetails
|
||||
spans := *spansPtr
|
||||
mapOfSpans := make(map[string]*model.SpanForTraceDetails, len(spans))
|
||||
mapOfSpans := make(map[string]*SpanForTraceDetails, len(spans))
|
||||
|
||||
for _, span := range spans {
|
||||
if span.ParentID == "" {
|
||||
@@ -206,8 +205,8 @@ func buildSpanTrees(spansPtr *[]*model.SpanForTraceDetails) ([]*model.SpanForTra
|
||||
}
|
||||
|
||||
// breadthFirstSearch performs a breadth-first search on the span tree to find the target span.
|
||||
func breadthFirstSearch(spansPtr *model.SpanForTraceDetails, targetId string) (*model.SpanForTraceDetails, error) {
|
||||
queue := []*model.SpanForTraceDetails{spansPtr}
|
||||
func breadthFirstSearch(spansPtr *SpanForTraceDetails, targetId string) (*SpanForTraceDetails, error) {
|
||||
queue := []*SpanForTraceDetails{spansPtr}
|
||||
visited := make(map[string]bool)
|
||||
|
||||
for len(queue) > 0 {
|
||||
@@ -439,7 +439,7 @@ func RegisterFirstUser(ctx context.Context, req *RegisterRequest) (*types.User,
|
||||
}
|
||||
|
||||
user := &types.User{
|
||||
ID: uuid.NewString(),
|
||||
ID: uuid.New().String(),
|
||||
Name: req.Name,
|
||||
Email: req.Email,
|
||||
Password: hash,
|
||||
@@ -519,7 +519,7 @@ func RegisterInvitedUser(ctx context.Context, req *RegisterRequest, nopassword b
|
||||
}
|
||||
|
||||
user := &types.User{
|
||||
ID: uuid.NewString(),
|
||||
ID: uuid.New().String(),
|
||||
Name: req.Name,
|
||||
Email: req.Email,
|
||||
Password: hash,
|
||||
|
||||
@@ -3,6 +3,7 @@ package auth
|
||||
import (
|
||||
"context"
|
||||
|
||||
errorsV2 "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/dao"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
@@ -51,7 +52,7 @@ func InitAuthCache(ctx context.Context) error {
|
||||
func GetUserFromReqContext(ctx context.Context) (*types.GettableUser, error) {
|
||||
claims, ok := authtypes.ClaimsFromContext(ctx)
|
||||
if !ok {
|
||||
return nil, errors.New("no claims found in context")
|
||||
return nil, errorsV2.New(errorsV2.TypeInvalidInput, errorsV2.CodeInvalidInput, "no claims found in context")
|
||||
}
|
||||
|
||||
user := &types.GettableUser{
|
||||
|
||||
@@ -68,10 +68,6 @@ func UseMetricsPreAggregation() bool {
|
||||
return GetOrDefaultEnv("USE_METRICS_PRE_AGGREGATION", "true") == "true"
|
||||
}
|
||||
|
||||
func EnableHostsInfraMonitoring() bool {
|
||||
return GetOrDefaultEnv("ENABLE_INFRA_METRICS", "true") == "true"
|
||||
}
|
||||
|
||||
var KafkaSpanEval = GetOrDefaultEnv("KAFKA_SPAN_EVAL", "false")
|
||||
|
||||
var DEFAULT_FEATURE_SET = model.FeatureSet{
|
||||
|
||||
@@ -32,13 +32,7 @@ func (fm *FeatureManager) CheckFeature(featureKey string) error {
|
||||
|
||||
// GetFeatureFlags returns current features
|
||||
func (fm *FeatureManager) GetFeatureFlags() (model.FeatureSet, error) {
|
||||
features := append(constants.DEFAULT_FEATURE_SET, model.Feature{
|
||||
Name: model.OSS,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
})
|
||||
features := constants.DEFAULT_FEATURE_SET
|
||||
return features, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -39,6 +39,7 @@ type Reader interface {
|
||||
GetNextPrevErrorIDs(ctx context.Context, params *model.GetErrorParams) (*model.NextPrevErrorIDs, *model.ApiError)
|
||||
|
||||
// Search Interfaces
|
||||
SearchTraces(ctx context.Context, params *model.SearchTracesParams) (*[]model.SearchSpansResult, error)
|
||||
GetWaterfallSpansForTraceWithMetadata(ctx context.Context, traceID string, req *model.GetWaterfallSpansForTraceWithMetadataParams) (*model.GetWaterfallSpansForTraceWithMetadataResponse, *model.ApiError)
|
||||
GetFlamegraphSpansForTrace(ctx context.Context, traceID string, req *model.GetFlamegraphSpansForTraceParams) (*model.GetFlamegraphSpansForTraceResponse, *model.ApiError)
|
||||
|
||||
|
||||
@@ -9,58 +9,11 @@ type Feature struct {
|
||||
Route string `db:"route" json:"route"`
|
||||
}
|
||||
|
||||
const CustomMetricsFunction = "CUSTOM_METRICS_FUNCTION"
|
||||
const DisableUpsell = "DISABLE_UPSELL"
|
||||
const OSS = "OSS"
|
||||
const QueryBuilderPanels = "QUERY_BUILDER_PANELS"
|
||||
const QueryBuilderAlerts = "QUERY_BUILDER_ALERTS"
|
||||
const UseSpanMetrics = "USE_SPAN_METRICS"
|
||||
const AlertChannelSlack = "ALERT_CHANNEL_SLACK"
|
||||
const AlertChannelWebhook = "ALERT_CHANNEL_WEBHOOK"
|
||||
const AlertChannelPagerduty = "ALERT_CHANNEL_PAGERDUTY"
|
||||
const AlertChannelMsTeams = "ALERT_CHANNEL_MSTEAMS"
|
||||
const AlertChannelOpsgenie = "ALERT_CHANNEL_OPSGENIE"
|
||||
const AlertChannelEmail = "ALERT_CHANNEL_EMAIL"
|
||||
const AnomalyDetection = "ANOMALY_DETECTION"
|
||||
const HostsInfraMonitoring = "HOSTS_INFRA_MONITORING"
|
||||
const TraceFunnels = "TRACE_FUNNELS"
|
||||
|
||||
var BasicPlan = FeatureSet{
|
||||
Feature{
|
||||
Name: OSS,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: DisableUpsell,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: CustomMetricsFunction,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: QueryBuilderPanels,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: QueryBuilderAlerts,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: UseSpanMetrics,
|
||||
Active: false,
|
||||
@@ -68,48 +21,6 @@ var BasicPlan = FeatureSet{
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelSlack,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelWebhook,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelPagerduty,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelOpsgenie,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelEmail,
|
||||
Active: true,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AlertChannelMsTeams,
|
||||
Active: false,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
},
|
||||
Feature{
|
||||
Name: AnomalyDetection,
|
||||
Active: false,
|
||||
|
||||
@@ -35,7 +35,7 @@ func TestAWSIntegrationAccountLifecycle(t *testing.T) {
|
||||
)
|
||||
|
||||
// Should be able to generate a connection url from UI - initializing an integration account
|
||||
testAccountConfig := cloudintegrations.AccountConfig{
|
||||
testAccountConfig := types.AccountConfig{
|
||||
EnabledRegions: []string{"us-east-1", "us-east-2"},
|
||||
}
|
||||
connectionUrlResp := testbed.GenerateConnectionUrlFromQS(
|
||||
@@ -65,8 +65,8 @@ func TestAWSIntegrationAccountLifecycle(t *testing.T) {
|
||||
testAWSAccountId := "4563215233"
|
||||
agentCheckInResp := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, agentCheckInResp.AccountId)
|
||||
@@ -91,20 +91,20 @@ func TestAWSIntegrationAccountLifecycle(t *testing.T) {
|
||||
require.Equal(testAWSAccountId, accountsListResp2.Accounts[0].CloudAccountId)
|
||||
|
||||
// Should be able to update account config from UI
|
||||
testAccountConfig2 := cloudintegrations.AccountConfig{
|
||||
testAccountConfig2 := types.AccountConfig{
|
||||
EnabledRegions: []string{"us-east-2", "us-west-1"},
|
||||
}
|
||||
latestAccount := testbed.UpdateAccountConfigWithQS(
|
||||
"aws", testAccountId, testAccountConfig2,
|
||||
)
|
||||
require.Equal(testAccountId, latestAccount.Id)
|
||||
require.Equal(testAccountId, latestAccount.ID.StringValue())
|
||||
require.Equal(testAccountConfig2, *latestAccount.Config)
|
||||
|
||||
// The agent should now receive latest account config.
|
||||
agentCheckInResp1 := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, agentCheckInResp1.AccountId)
|
||||
@@ -114,14 +114,14 @@ func TestAWSIntegrationAccountLifecycle(t *testing.T) {
|
||||
// Should be able to disconnect/remove account from UI.
|
||||
tsBeforeDisconnect := time.Now()
|
||||
latestAccount = testbed.DisconnectAccountWithQS("aws", testAccountId)
|
||||
require.Equal(testAccountId, latestAccount.Id)
|
||||
require.Equal(testAccountId, latestAccount.ID.StringValue())
|
||||
require.LessOrEqual(tsBeforeDisconnect, *latestAccount.RemovedAt)
|
||||
|
||||
// The agent should receive the disconnected status in account config post disconnection
|
||||
agentCheckInResp2 := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, agentCheckInResp2.AccountId)
|
||||
@@ -157,13 +157,13 @@ func TestAWSIntegrationServices(t *testing.T) {
|
||||
testAWSAccountId := "389389489489"
|
||||
testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
|
||||
testSvcConfig := cloudintegrations.CloudServiceConfig{
|
||||
Metrics: &cloudintegrations.CloudServiceMetricsConfig{
|
||||
testSvcConfig := types.CloudServiceConfig{
|
||||
Metrics: &types.CloudServiceMetricsConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
@@ -199,7 +199,7 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
testbed := NewCloudIntegrationsTestBed(t, nil)
|
||||
|
||||
// configure a connected account
|
||||
testAccountConfig := cloudintegrations.AccountConfig{
|
||||
testAccountConfig := types.AccountConfig{
|
||||
EnabledRegions: []string{"us-east-1", "us-east-2"},
|
||||
}
|
||||
connectionUrlResp := testbed.GenerateConnectionUrlFromQS(
|
||||
@@ -218,8 +218,8 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
testAWSAccountId := "389389489489"
|
||||
checkinResp := testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -237,14 +237,14 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
|
||||
// helper
|
||||
setServiceConfig := func(svcId string, metricsEnabled bool, logsEnabled bool) {
|
||||
testSvcConfig := cloudintegrations.CloudServiceConfig{}
|
||||
testSvcConfig := types.CloudServiceConfig{}
|
||||
if metricsEnabled {
|
||||
testSvcConfig.Metrics = &cloudintegrations.CloudServiceMetricsConfig{
|
||||
testSvcConfig.Metrics = &types.CloudServiceMetricsConfig{
|
||||
Enabled: metricsEnabled,
|
||||
}
|
||||
}
|
||||
if logsEnabled {
|
||||
testSvcConfig.Logs = &cloudintegrations.CloudServiceLogsConfig{
|
||||
testSvcConfig.Logs = &types.CloudServiceLogsConfig{
|
||||
Enabled: logsEnabled,
|
||||
}
|
||||
}
|
||||
@@ -262,8 +262,8 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
|
||||
checkinResp = testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -292,13 +292,13 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
require.True(strings.HasPrefix(logGroupPrefixes[0], "/aws/rds"))
|
||||
|
||||
// change regions and update service configs and validate config changes for agent
|
||||
testAccountConfig2 := cloudintegrations.AccountConfig{
|
||||
testAccountConfig2 := types.AccountConfig{
|
||||
EnabledRegions: []string{"us-east-2", "us-west-1"},
|
||||
}
|
||||
latestAccount := testbed.UpdateAccountConfigWithQS(
|
||||
"aws", testAccountId, testAccountConfig2,
|
||||
)
|
||||
require.Equal(testAccountId, latestAccount.Id)
|
||||
require.Equal(testAccountId, latestAccount.ID.StringValue())
|
||||
require.Equal(testAccountConfig2, *latestAccount.Config)
|
||||
|
||||
// disable metrics for one and logs for the other.
|
||||
@@ -308,8 +308,8 @@ func TestConfigReturnedWhenAgentChecksIn(t *testing.T) {
|
||||
|
||||
checkinResp = testbed.CheckInAsAgentWithQS(
|
||||
"aws", cloudintegrations.AgentCheckInRequest{
|
||||
AccountId: testAccountId,
|
||||
CloudAccountId: testAWSAccountId,
|
||||
ID: testAccountId,
|
||||
AccountID: testAWSAccountId,
|
||||
},
|
||||
)
|
||||
require.Equal(testAccountId, checkinResp.AccountId)
|
||||
@@ -453,8 +453,8 @@ func (tb *CloudIntegrationsTestBed) CheckInAsAgentWithQS(
|
||||
}
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) UpdateAccountConfigWithQS(
|
||||
cloudProvider string, accountId string, newConfig cloudintegrations.AccountConfig,
|
||||
) *cloudintegrations.AccountRecord {
|
||||
cloudProvider string, accountId string, newConfig types.AccountConfig,
|
||||
) *types.CloudIntegration {
|
||||
respDataJson := tb.RequestQS(
|
||||
fmt.Sprintf(
|
||||
"/api/v1/cloud-integrations/%s/accounts/%s/config",
|
||||
@@ -464,7 +464,7 @@ func (tb *CloudIntegrationsTestBed) UpdateAccountConfigWithQS(
|
||||
},
|
||||
)
|
||||
|
||||
var resp cloudintegrations.AccountRecord
|
||||
var resp types.CloudIntegration
|
||||
err := json.Unmarshal(respDataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into Account")
|
||||
@@ -475,7 +475,7 @@ func (tb *CloudIntegrationsTestBed) UpdateAccountConfigWithQS(
|
||||
|
||||
func (tb *CloudIntegrationsTestBed) DisconnectAccountWithQS(
|
||||
cloudProvider string, accountId string,
|
||||
) *cloudintegrations.AccountRecord {
|
||||
) *types.CloudIntegration {
|
||||
respDataJson := tb.RequestQS(
|
||||
fmt.Sprintf(
|
||||
"/api/v1/cloud-integrations/%s/accounts/%s/disconnect",
|
||||
@@ -483,7 +483,7 @@ func (tb *CloudIntegrationsTestBed) DisconnectAccountWithQS(
|
||||
), map[string]any{},
|
||||
)
|
||||
|
||||
var resp cloudintegrations.AccountRecord
|
||||
var resp types.CloudIntegration
|
||||
err := json.Unmarshal(respDataJson, &resp)
|
||||
if err != nil {
|
||||
tb.t.Fatalf("could not unmarshal apiResponse.Data json into Account")
|
||||
|
||||
@@ -166,6 +166,7 @@ func createTestUser() (*types.User, *model.ApiError) {
|
||||
auth.InitAuthCache(ctx)
|
||||
|
||||
userId := uuid.NewString()
|
||||
|
||||
return dao.DB().CreateUser(
|
||||
ctx,
|
||||
&types.User{
|
||||
|
||||
@@ -48,9 +48,15 @@ func NewTestSqliteDB(t *testing.T) (sqlStore sqlstore.SQLStore, testDBFilePath s
|
||||
sqlmigration.NewModifyDatetimeFactory(),
|
||||
sqlmigration.NewModifyOrgDomainFactory(),
|
||||
sqlmigration.NewUpdateOrganizationFactory(sqlStore),
|
||||
sqlmigration.NewAddAlertmanagerFactory(sqlStore),
|
||||
sqlmigration.NewUpdateDashboardAndSavedViewsFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePatAndOrgDomainsFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePipelines(sqlStore),
|
||||
sqlmigration.NewDropLicensesSitesFactory(sqlStore),
|
||||
sqlmigration.NewUpdateInvitesFactory(sqlStore),
|
||||
sqlmigration.NewUpdatePatFactory(sqlStore),
|
||||
sqlmigration.NewAddVirtualFieldsFactory(),
|
||||
sqlmigration.NewUpdateIntegrationsFactory(sqlStore),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@@ -69,6 +69,8 @@ func NewSQLMigrationProviderFactories(sqlstore sqlstore.SQLStore) factory.NamedM
|
||||
sqlmigration.NewUpdatePreferencesFactory(sqlstore),
|
||||
sqlmigration.NewUpdateApdexTtlFactory(sqlstore),
|
||||
sqlmigration.NewUpdateResetPasswordFactory(sqlstore),
|
||||
sqlmigration.NewAddVirtualFieldsFactory(),
|
||||
sqlmigration.NewUpdateIntegrationsFactory(sqlstore),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
58
pkg/sqlmigration/025_add_virtual_fields.go
Normal file
58
pkg/sqlmigration/025_add_virtual_fields.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
)
|
||||
|
||||
type addVirtualFields struct{}
|
||||
|
||||
func NewAddVirtualFieldsFactory() factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_virtual_fields"), newAddVirtualFields)
|
||||
}
|
||||
|
||||
func newAddVirtualFields(_ context.Context, _ factory.ProviderSettings, _ Config) (SQLMigration, error) {
|
||||
return &addVirtualFields{}, nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Up(ctx context.Context, db *bun.DB) error {
|
||||
// table:virtual_field op:create
|
||||
if _, err := db.NewCreateTable().
|
||||
Model(&struct {
|
||||
bun.BaseModel `bun:"table:virtual_field"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
Name string `bun:"name,type:text,notnull"`
|
||||
Expression string `bun:"expression,type:text,notnull"`
|
||||
Description string `bun:"description,type:text"`
|
||||
Signal telemetrytypes.Signal `bun:"signal,type:text,notnull"`
|
||||
OrgID string `bun:"org_id,type:text,notnull"`
|
||||
}{}).
|
||||
ForeignKey(`("org_id") REFERENCES "organizations" ("id") ON DELETE CASCADE`).
|
||||
IfNotExists().
|
||||
Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addVirtualFields) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
421
pkg/sqlmigration/026_update_integrations.go
Normal file
421
pkg/sqlmigration/026_update_integrations.go
Normal file
@@ -0,0 +1,421 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"time"
|
||||
|
||||
eeTypes "github.com/SigNoz/signoz/ee/types"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type updateIntegrations struct {
|
||||
store sqlstore.SQLStore
|
||||
}
|
||||
|
||||
func NewUpdateIntegrationsFactory(sqlstore sqlstore.SQLStore) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("update_integrations"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return newUpdateIntegrations(ctx, ps, c, sqlstore)
|
||||
})
|
||||
}
|
||||
|
||||
func newUpdateIntegrations(_ context.Context, _ factory.ProviderSettings, _ Config, store sqlstore.SQLStore) (SQLMigration, error) {
|
||||
return &updateIntegrations{
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type existingInstalledIntegration struct {
|
||||
bun.BaseModel `bun:"table:integrations_installed"`
|
||||
|
||||
IntegrationID string `bun:"integration_id,pk,type:text"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
InstalledAt time.Time `bun:"installed_at,default:current_timestamp"`
|
||||
}
|
||||
|
||||
type newInstalledIntegration struct {
|
||||
bun.BaseModel `bun:"table:installed_integration"`
|
||||
|
||||
types.Identifiable
|
||||
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
|
||||
Config string `json:"config" bun:"config,type:text"`
|
||||
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
|
||||
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type"`
|
||||
}
|
||||
|
||||
type existingCloudIntegration struct {
|
||||
bun.BaseModel `bun:"table:cloud_integrations_accounts"`
|
||||
|
||||
CloudProvider string `bun:"cloud_provider,type:text,unique:cloud_provider_id"`
|
||||
ID string `bun:"id,type:text,notnull,unique:cloud_provider_id"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
CloudAccountID string `bun:"cloud_account_id,type:text"`
|
||||
LastAgentReportJSON string `bun:"last_agent_report_json,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,notnull,default:current_timestamp"`
|
||||
RemovedAt *time.Time `bun:"removed_at,type:timestamp"`
|
||||
}
|
||||
|
||||
type newCloudIntegration struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
Provider string `json:"provider" bun:"provider,type:text"`
|
||||
Config string `json:"config" bun:"config,type:text"`
|
||||
AccountID string `json:"account_id" bun:"account_id,type:text"`
|
||||
LastAgentReport string `json:"last_agent_report" bun:"last_agent_report,type:text"`
|
||||
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp"`
|
||||
OrgID string `json:"org_id" bun:"org_id,type:text"`
|
||||
}
|
||||
|
||||
type existingCloudIntegrationService struct {
|
||||
bun.BaseModel `bun:"table:cloud_integrations_service_configs,alias:c1"`
|
||||
|
||||
CloudProvider string `bun:"cloud_provider,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
CloudAccountID string `bun:"cloud_account_id,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
ServiceID string `bun:"service_id,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,default:current_timestamp"`
|
||||
}
|
||||
|
||||
type newCloudIntegrationService struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
|
||||
Config string `bun:"config,type:text"`
|
||||
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type"`
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) Up(ctx context.Context, db *bun.DB) error {
|
||||
|
||||
// begin transaction
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// don't run the migration if there are multiple org ids
|
||||
orgIDs := make([]string, 0)
|
||||
err = migration.store.BunDB().NewSelect().Model((*types.Organization)(nil)).Column("id").Scan(ctx, &orgIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(orgIDs) > 1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ---
|
||||
// installed integrations
|
||||
// ---
|
||||
err = migration.
|
||||
store.
|
||||
Dialect().
|
||||
RenameTableAndModifyModel(ctx, tx, new(existingInstalledIntegration), new(newInstalledIntegration), []string{OrgReference}, func(ctx context.Context) error {
|
||||
existingIntegrations := make([]*existingInstalledIntegration, 0)
|
||||
err = tx.
|
||||
NewSelect().
|
||||
Model(&existingIntegrations).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
if err != sql.ErrNoRows {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil && len(existingIntegrations) > 0 {
|
||||
newIntegrations := migration.
|
||||
CopyOldIntegrationsToNewIntegrations(tx, orgIDs[0], existingIntegrations)
|
||||
_, err = tx.
|
||||
NewInsert().
|
||||
Model(&newIntegrations).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ---
|
||||
// cloud integrations
|
||||
// ---
|
||||
err = migration.
|
||||
store.
|
||||
Dialect().
|
||||
RenameTableAndModifyModel(ctx, tx, new(existingCloudIntegration), new(newCloudIntegration), []string{OrgReference}, func(ctx context.Context) error {
|
||||
existingIntegrations := make([]*existingCloudIntegration, 0)
|
||||
err = tx.
|
||||
NewSelect().
|
||||
Model(&existingIntegrations).
|
||||
Where("removed_at IS NULL"). // we will only copy the accounts that are not removed
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
if err != sql.ErrNoRows {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil && len(existingIntegrations) > 0 {
|
||||
newIntegrations := migration.
|
||||
CopyOldCloudIntegrationsToNewCloudIntegrations(tx, orgIDs[0], existingIntegrations)
|
||||
_, err = tx.
|
||||
NewInsert().
|
||||
Model(&newIntegrations).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add unique constraint to cloud_integration table
|
||||
_, err = tx.ExecContext(ctx, `CREATE UNIQUE INDEX IF NOT EXISTS unique_cloud_integration ON cloud_integration (id, provider, org_id)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ---
|
||||
// cloud integration service
|
||||
// ---
|
||||
err = migration.
|
||||
store.
|
||||
Dialect().
|
||||
RenameTableAndModifyModel(ctx, tx, new(existingCloudIntegrationService), new(newCloudIntegrationService), []string{CloudIntegrationReference}, func(ctx context.Context) error {
|
||||
existingServices := make([]*existingCloudIntegrationService, 0)
|
||||
|
||||
// for a account id with same cloud provider and service_id,
|
||||
// we will get the latest created service using created_at column
|
||||
err = tx.
|
||||
NewSelect().
|
||||
Model(&existingServices).
|
||||
Where("created_at = (SELECT MAX(created_at) FROM cloud_integrations_service_configs c2 WHERE c2.cloud_provider = c1.cloud_provider AND c2.cloud_account_id = c1.cloud_account_id AND c2.service_id = c1.service_id)").
|
||||
OrderExpr("c1.cloud_provider, c1.cloud_account_id, c1.service_id").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
if err != sql.ErrNoRows {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil && len(existingServices) > 0 {
|
||||
newServices := migration.
|
||||
CopyOldCloudIntegrationServicesToNewCloudIntegrationServices(tx, orgIDs[0], existingServices)
|
||||
_, err = tx.
|
||||
NewInsert().
|
||||
Model(&newServices).
|
||||
Exec(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(orgIDs) == 0 {
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copy the old aws integration user to the new user
|
||||
err = migration.copyOldAwsIntegrationUser(tx, orgIDs[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) Down(ctx context.Context, db *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) CopyOldIntegrationsToNewIntegrations(tx bun.IDB, orgID string, existingIntegrations []*existingInstalledIntegration) []*newInstalledIntegration {
|
||||
newIntegrations := make([]*newInstalledIntegration, 0)
|
||||
|
||||
for _, integration := range existingIntegrations {
|
||||
newIntegrations = append(newIntegrations, &newInstalledIntegration{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
Type: integration.IntegrationID,
|
||||
Config: integration.ConfigJSON,
|
||||
InstalledAt: integration.InstalledAt,
|
||||
OrgID: orgID,
|
||||
})
|
||||
}
|
||||
|
||||
return newIntegrations
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) CopyOldCloudIntegrationsToNewCloudIntegrations(tx bun.IDB, orgID string, existingIntegrations []*existingCloudIntegration) []*newCloudIntegration {
|
||||
newIntegrations := make([]*newCloudIntegration, 0)
|
||||
|
||||
for _, integration := range existingIntegrations {
|
||||
newIntegrations = append(newIntegrations, &newCloudIntegration{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: integration.CreatedAt,
|
||||
UpdatedAt: integration.CreatedAt,
|
||||
},
|
||||
Provider: integration.CloudProvider,
|
||||
AccountID: integration.CloudAccountID,
|
||||
Config: integration.ConfigJSON,
|
||||
LastAgentReport: integration.LastAgentReportJSON,
|
||||
RemovedAt: integration.RemovedAt,
|
||||
OrgID: orgID,
|
||||
})
|
||||
}
|
||||
|
||||
return newIntegrations
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) CopyOldCloudIntegrationServicesToNewCloudIntegrationServices(tx bun.IDB, orgID string, existingServices []*existingCloudIntegrationService) []*newCloudIntegrationService {
|
||||
newServices := make([]*newCloudIntegrationService, 0)
|
||||
|
||||
// existing services are the latest service
|
||||
// for a account id with same cloud provider and service_id,
|
||||
// we will get the latest created service using created_at column
|
||||
for _, service := range existingServices {
|
||||
var cloudIntegrationID string
|
||||
err := tx.NewSelect().
|
||||
Model((*newCloudIntegration)(nil)).
|
||||
Column("id").
|
||||
Where("account_id = ?", service.CloudAccountID).
|
||||
Where("provider = ?", service.CloudProvider).
|
||||
Where("org_id = ?", orgID).
|
||||
Scan(context.Background(), &cloudIntegrationID)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
continue
|
||||
}
|
||||
zap.L().Error("failed to get cloud integration id", zap.Error(err))
|
||||
return nil
|
||||
}
|
||||
newServices = append(newServices, &newCloudIntegrationService{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: service.CreatedAt,
|
||||
UpdatedAt: service.CreatedAt,
|
||||
},
|
||||
Type: service.ServiceID,
|
||||
Config: service.ConfigJSON,
|
||||
CloudIntegrationID: cloudIntegrationID,
|
||||
})
|
||||
}
|
||||
|
||||
return newServices
|
||||
}
|
||||
|
||||
func (migration *updateIntegrations) copyOldAwsIntegrationUser(tx bun.IDB, orgID string) error {
|
||||
user := &types.User{}
|
||||
err := tx.NewSelect().Model(user).Where("email = ?", "aws-integration@signoz.io").Scan(context.Background())
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// check if the id is already an uuid
|
||||
if _, err := uuid.Parse(user.ID); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// new user
|
||||
newUser := &types.User{
|
||||
ID: uuid.New().String(),
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
OrgID: orgID,
|
||||
Name: user.Name,
|
||||
Email: user.Email,
|
||||
GroupID: user.GroupID,
|
||||
Password: user.Password,
|
||||
}
|
||||
|
||||
// get the pat for old user
|
||||
pat := &eeTypes.StorablePersonalAccessToken{}
|
||||
err = tx.NewSelect().Model(pat).Where("user_id = ? and revoked = false", "aws-integration").Scan(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// new pat
|
||||
newPAT := &eeTypes.StorablePersonalAccessToken{
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
},
|
||||
OrgID: orgID,
|
||||
UserID: newUser.ID,
|
||||
Token: pat.Token,
|
||||
Name: pat.Name,
|
||||
ExpiresAt: pat.ExpiresAt,
|
||||
LastUsed: pat.LastUsed,
|
||||
Revoked: pat.Revoked,
|
||||
Role: pat.Role,
|
||||
}
|
||||
|
||||
// delete old user
|
||||
_, err = tx.ExecContext(context.Background(), `DELETE FROM users WHERE id = ?`, user.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// insert the new user
|
||||
_, err = tx.NewInsert().Model(newUser).Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// insert the new pat
|
||||
_, err = tx.NewInsert().Model(newPAT).Exec(context.Background())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -26,8 +26,9 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
OrgReference = "org"
|
||||
UserReference = "user"
|
||||
OrgReference = "org"
|
||||
UserReference = "user"
|
||||
CloudIntegrationReference = "cloud_integration"
|
||||
)
|
||||
|
||||
func New(
|
||||
|
||||
@@ -17,13 +17,15 @@ var (
|
||||
)
|
||||
|
||||
var (
|
||||
Org = "org"
|
||||
User = "user"
|
||||
Org = "org"
|
||||
User = "user"
|
||||
CloudIntegration = "cloud_integration"
|
||||
)
|
||||
|
||||
var (
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
OrgReference = `("org_id") REFERENCES "organizations" ("id")`
|
||||
UserReference = `("user_id") REFERENCES "users" ("id") ON DELETE CASCADE ON UPDATE CASCADE`
|
||||
CloudIntegrationReference = `("cloud_integration_id") REFERENCES "cloud_integration" ("id") ON DELETE CASCADE`
|
||||
)
|
||||
|
||||
type dialect struct {
|
||||
@@ -202,6 +204,8 @@ func (dialect *dialect) RenameTableAndModifyModel(ctx context.Context, bun bun.I
|
||||
fkReferences = append(fkReferences, OrgReference)
|
||||
} else if reference == User && !slices.Contains(fkReferences, UserReference) {
|
||||
fkReferences = append(fkReferences, UserReference)
|
||||
} else if reference == CloudIntegration && !slices.Contains(fkReferences, CloudIntegrationReference) {
|
||||
fkReferences = append(fkReferences, CloudIntegrationReference)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
275
pkg/telemetrylogs/condition_builder.go
Normal file
275
pkg/telemetrylogs/condition_builder.go
Normal file
@@ -0,0 +1,275 @@
|
||||
package telemetrylogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
logsV2Columns = map[string]*schema.Column{
|
||||
"ts_bucket_start": {Name: "ts_bucket_start", Type: schema.ColumnTypeUInt64},
|
||||
"resource_fingerprint": {Name: "resource_fingerprint", Type: schema.ColumnTypeString},
|
||||
|
||||
"timestamp": {Name: "timestamp", Type: schema.ColumnTypeUInt64},
|
||||
"observed_timestamp": {Name: "observed_timestamp", Type: schema.ColumnTypeUInt64},
|
||||
"id": {Name: "id", Type: schema.ColumnTypeString},
|
||||
"trace_id": {Name: "trace_id", Type: schema.ColumnTypeString},
|
||||
"span_id": {Name: "span_id", Type: schema.ColumnTypeString},
|
||||
"trace_flags": {Name: "trace_flags", Type: schema.ColumnTypeUInt32},
|
||||
"severity_text": {Name: "severity_text", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"severity_number": {Name: "severity_number", Type: schema.ColumnTypeUInt8},
|
||||
"body": {Name: "body", Type: schema.ColumnTypeString},
|
||||
"attributes_string": {Name: "attributes_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes_number": {Name: "attributes_number", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}},
|
||||
"attributes_bool": {Name: "attributes_bool", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}},
|
||||
"resources_string": {Name: "resources_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"scope_name": {Name: "scope_name", Type: schema.ColumnTypeString},
|
||||
"scope_version": {Name: "scope_version", Type: schema.ColumnTypeString},
|
||||
"scope_string": {Name: "scope_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
var _ qbtypes.ConditionBuilder = &conditionBuilder{}
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return logsV2Columns["resources_string"], nil
|
||||
case telemetrytypes.FieldContextScope:
|
||||
switch key.Name {
|
||||
case "name", "scope.name", "scope_name":
|
||||
return logsV2Columns["scope_name"], nil
|
||||
case "version", "scope.version", "scope_version":
|
||||
return logsV2Columns["scope_version"], nil
|
||||
}
|
||||
return logsV2Columns["scope_string"], nil
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
switch key.FieldDataType {
|
||||
case telemetrytypes.FieldDataTypeString:
|
||||
return logsV2Columns["attributes_string"], nil
|
||||
case telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeNumber:
|
||||
return logsV2Columns["attributes_number"], nil
|
||||
case telemetrytypes.FieldDataTypeBool:
|
||||
return logsV2Columns["attributes_bool"], nil
|
||||
}
|
||||
case telemetrytypes.FieldContextLog:
|
||||
col, ok := logsV2Columns[key.Name]
|
||||
if !ok {
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
return col, nil
|
||||
}
|
||||
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeUInt8:
|
||||
return column.Name, nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
// should not reach here
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThan:
|
||||
return sb.G(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThanOrEq:
|
||||
return sb.GE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThan:
|
||||
return sb.LT(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThanOrEq:
|
||||
return sb.LE(tblFieldName, value), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.Like(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.NotLike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.ILike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.NotILike(tblFieldName, value), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
// between and not between
|
||||
case qbtypes.FilterOperatorBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.Between(tblFieldName, values[0], values[1]), nil
|
||||
case qbtypes.FilterOperatorNotBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.NotBetween(tblFieldName, values[0], values[1]), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.In(tblFieldName, values...), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.NotIn(tblFieldName, values...), nil
|
||||
|
||||
// exists and not exists
|
||||
// but how could you live and have no story to tell
|
||||
// in the UI based query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
var value any
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString, schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}:
|
||||
value = ""
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.ColumnTypeUInt64, schema.ColumnTypeUInt32, schema.ColumnTypeUInt8:
|
||||
value = 0
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if key.Materialized {
|
||||
leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
|
||||
}
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("exists operator is not supported for column type %s", column.Type)
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("unsupported operator: %v", operator)
|
||||
}
|
||||
620
pkg/telemetrylogs/condition_builder_test.go
Normal file
620
pkg/telemetrylogs/condition_builder_test.go
Normal file
@@ -0,0 +1,620 @@
|
||||
package telemetrylogs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetColumn(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedCol *schema.Column
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Resource field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedCol: logsV2Columns["resources_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope.name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope.name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope_name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope_name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_name"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - version",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "version",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_version"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - other scope field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "custom.scope.field",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: logsV2Columns["scope_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - string type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_string"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - number type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - int64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeInt64,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - float64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "cpu.utilization",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_number"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - bool type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_bool"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: logsV2Columns["timestamp"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: logsV2Columns["body"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - nonexistent",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "did_user_login",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "did_user_login",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedCol: logsV2Columns["attributes_bool"],
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
col, err := conditionBuilder.GetColumn(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedCol, col)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Simple column type - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "timestamp",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes_string['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes_number['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes_bool['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resources_string['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "error message",
|
||||
expectedSQL: "body = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotEqual,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThan,
|
||||
value: float64(100),
|
||||
expectedSQL: "attributes_number['request.duration'] > ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThan,
|
||||
value: float64(1024),
|
||||
expectedSQL: "attributes_number['request.size'] < ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp >= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Like operator - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLike,
|
||||
value: "%error%",
|
||||
expectedSQL: "body LIKE ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Like operator - body",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotLike,
|
||||
value: "%error%",
|
||||
expectedSQL: "body NOT LIKE ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) NOT LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Contains operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorContains,
|
||||
value: "admin",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: "invalid",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Between operator - insufficient values",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000)},
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Not Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp NOT BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - severity_text",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: []any{"error", "fatal", "critical"},
|
||||
expectedSQL: "severity_text IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "In operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorIn,
|
||||
value: "error",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrInValues,
|
||||
},
|
||||
{
|
||||
name: "Not In operator - severity_text",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotIn,
|
||||
value: []any{"debug", "info", "trace"},
|
||||
expectedSQL: "severity_text NOT IN (?, ?, ?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - string field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "body <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - string field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "body = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - number field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "value",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConditionMultiple(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
keys []*telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Equal operator - string",
|
||||
keys: []*telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "body",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
{
|
||||
Name: "severity_text",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "error message",
|
||||
expectedSQL: "body = ? AND severity_text = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
var err error
|
||||
for _, key := range tc.keys {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
if err != nil {
|
||||
t.Fatalf("Error getting condition for key %s: %v", key.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
9
pkg/telemetrylogs/tables.go
Normal file
9
pkg/telemetrylogs/tables.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package telemetrylogs
|
||||
|
||||
const (
|
||||
DBName = "signoz_logs"
|
||||
LogsV2TableName = "distributed_logs_v2"
|
||||
LogsV2LocalTableName = "logs_v2"
|
||||
TagAttributesV2TableName = "distributed_tag_attributes_v2"
|
||||
TagAttributesV2LocalTableName = "tag_attributes_v2"
|
||||
)
|
||||
149
pkg/telemetrymetadata/condition_builder.go
Normal file
149
pkg/telemetrymetadata/condition_builder.go
Normal file
@@ -0,0 +1,149 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
attributeMetadataColumns = map[string]*schema.Column{
|
||||
"resource_attributes": {Name: "resource_attributes", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes": {Name: "attributes", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
}
|
||||
)
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return attributeMetadataColumns["resource_attributes"], nil
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
return attributeMetadataColumns["attributes"], nil
|
||||
}
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
// if we don't have a column, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
// if we don't have a table field name, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if key.FieldDataType != telemetrytypes.FieldDataTypeString {
|
||||
// if the field data type is not string, we can't build a condition for related values
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// key must exists to apply main filter
|
||||
containsExp := fmt.Sprintf("mapContains(%s, %s)", column.Name, sb.Var(key.Name))
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.And(containsExp, sb.E(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.And(containsExp, sb.NE(tblFieldName, value)), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.And(containsExp, sb.Like(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.And(containsExp, sb.NotLike(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.And(containsExp, sb.ILike(tblFieldName, value)), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.And(containsExp, sb.NotILike(tblFieldName, value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.And(containsExp, sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value))), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.And(containsExp, sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value))), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(containsExp, exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(containsExp, exp), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.And(containsExp, sb.In(tblFieldName, values...)), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.And(containsExp, sb.NotIn(tblFieldName, values...)), nil
|
||||
|
||||
// exists and not exists
|
||||
// in the query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
switch column.Type {
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
272
pkg/telemetrymetadata/condition_builder_test.go
Normal file
272
pkg/telemetrymetadata/condition_builder_test.go
Normal file
@@ -0,0 +1,272 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetColumn(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedCol *schema.Column
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Resource field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["resource_attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope.name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope.name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - scope_name",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "scope_name",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - version",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "version",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Scope field - other scope field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "custom.scope.field",
|
||||
FieldContext: telemetrytypes.FieldContextScope,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - string type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - number type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - int64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeInt64,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Attribute field - float64 type",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "cpu.utilization",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
},
|
||||
expectedCol: attributeMetadataColumns["attributes"],
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Log field - nonexistent",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedCol: nil,
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
col, err := conditionBuilder.GetColumn(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedCol, col)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resource_attributes['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE (mapContains(attributes, ?) AND LOWER(attributes['user.id']) LIKE LOWER(?))",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE (mapContains(attributes, ?) AND LOWER(attributes['user.id']) NOT LIKE LOWER(?))",
|
||||
expectedError: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
691
pkg/telemetrymetadata/metadata.go
Normal file
691
pkg/telemetrymetadata/metadata.go
Normal file
@@ -0,0 +1,691 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrFailedToGetTracesKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get traces keys")
|
||||
ErrFailedToGetLogsKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get logs keys")
|
||||
ErrFailedToGetTblStatement = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get tbl statement")
|
||||
ErrFailedToGetMetricsKeys = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get metrics keys")
|
||||
ErrFailedToGetRelatedValues = errors.Newf(errors.TypeInternal, errors.CodeInternal, "failed to get related values")
|
||||
)
|
||||
|
||||
type telemetryMetaStore struct {
|
||||
telemetrystore telemetrystore.TelemetryStore
|
||||
tracesDBName string
|
||||
tracesFieldsTblName string
|
||||
indexV3TblName string
|
||||
metricsDBName string
|
||||
metricsFieldsTblName string
|
||||
timeseries1WTblName string
|
||||
logsDBName string
|
||||
logsFieldsTblName string
|
||||
logsV2TblName string
|
||||
relatedMetadataDBName string
|
||||
relatedMetadataTblName string
|
||||
|
||||
conditionBuilder qbtypes.ConditionBuilder
|
||||
}
|
||||
|
||||
func NewTelemetryMetaStore(
|
||||
telemetrystore telemetrystore.TelemetryStore,
|
||||
tracesDBName string,
|
||||
tracesFieldsTblName string,
|
||||
indexV3TblName string,
|
||||
metricsDBName string,
|
||||
metricsFieldsTblName string,
|
||||
timeseries1WTblName string,
|
||||
logsDBName string,
|
||||
logsV2TblName string,
|
||||
logsFieldsTblName string,
|
||||
relatedMetadataDBName string,
|
||||
relatedMetadataTblName string,
|
||||
) (telemetrytypes.MetadataStore, error) {
|
||||
return &telemetryMetaStore{
|
||||
telemetrystore: telemetrystore,
|
||||
tracesDBName: tracesDBName,
|
||||
tracesFieldsTblName: tracesFieldsTblName,
|
||||
indexV3TblName: indexV3TblName,
|
||||
metricsDBName: metricsDBName,
|
||||
metricsFieldsTblName: metricsFieldsTblName,
|
||||
timeseries1WTblName: timeseries1WTblName,
|
||||
logsDBName: logsDBName,
|
||||
logsV2TblName: logsV2TblName,
|
||||
logsFieldsTblName: logsFieldsTblName,
|
||||
relatedMetadataDBName: relatedMetadataDBName,
|
||||
relatedMetadataTblName: relatedMetadataTblName,
|
||||
|
||||
conditionBuilder: NewConditionBuilder(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// tracesTblStatementToFieldKeys returns materialised attribute/resource/scope keys from the traces table
|
||||
func (t *telemetryMetaStore) tracesTblStatementToFieldKeys(ctx context.Context) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", t.tracesDBName, t.indexV3TblName)
|
||||
statements := []telemetrytypes.ShowCreateTableStatement{}
|
||||
err := t.telemetrystore.ClickhouseDB().Select(ctx, &statements, query)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTblStatement.Error())
|
||||
}
|
||||
|
||||
return ExtractFieldKeysFromTblStatement(statements[0].Statement)
|
||||
}
|
||||
|
||||
// getTracesKeys returns the keys from the spans that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getTracesKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// pre-fetch the materialised keys from the traces table
|
||||
matKeys, err := t.tracesTblStatementToFieldKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapOfKeys := make(map[string]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range matKeys {
|
||||
mapOfKeys[key.Name+";"+key.FieldContext.StringValue()+";"+key.FieldDataType.StringValue()] = key
|
||||
}
|
||||
|
||||
sb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", `
|
||||
CASE
|
||||
WHEN tag_type = 'spanfield' THEN 1
|
||||
WHEN tag_type = 'resource' THEN 2
|
||||
WHEN tag_type = 'scope' THEN 3
|
||||
WHEN tag_type = 'tag' THEN 4
|
||||
ELSE 5
|
||||
END as priority`).From(t.tracesDBName + "." + t.tracesFieldsTblName)
|
||||
var limit int
|
||||
|
||||
conds := []string{}
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
|
||||
if fieldKeySelector.StartUnixMilli != 0 {
|
||||
conds = append(conds, sb.GE("unix_milli", fieldKeySelector.StartUnixMilli))
|
||||
}
|
||||
if fieldKeySelector.EndUnixMilli != 0 {
|
||||
conds = append(conds, sb.LE("unix_milli", fieldKeySelector.EndUnixMilli))
|
||||
}
|
||||
|
||||
// key part of the selector
|
||||
fieldKeyConds := []string{}
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_key", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.Like("tag_key", "%"+fieldKeySelector.Name+"%"))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_type", fieldKeySelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_data_type", fieldKeySelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
conds = append(conds, sb.And(fieldKeyConds...))
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
}
|
||||
|
||||
mainSb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", "max(priority) as priority")
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
key, ok := mapOfKeys[name+";"+fieldContext.StringValue()+";"+fieldDataType.StringValue()]
|
||||
|
||||
// if there is no materialised column, create a key with the field context and data type
|
||||
if !ok {
|
||||
key = &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
}
|
||||
}
|
||||
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTracesKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// logsTblStatementToFieldKeys returns materialised attribute/resource/scope keys from the logs table
|
||||
func (t *telemetryMetaStore) logsTblStatementToFieldKeys(ctx context.Context) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
query := fmt.Sprintf("SHOW CREATE TABLE %s.%s", t.logsDBName, t.logsV2TblName)
|
||||
statements := []telemetrytypes.ShowCreateTableStatement{}
|
||||
err := t.telemetrystore.ClickhouseDB().Select(ctx, &statements, query)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetTblStatement.Error())
|
||||
}
|
||||
|
||||
return ExtractFieldKeysFromTblStatement(statements[0].Statement)
|
||||
}
|
||||
|
||||
// getLogsKeys returns the keys from the spans that match the field selection criteria
|
||||
func (t *telemetryMetaStore) getLogsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// pre-fetch the materialised keys from the logs table
|
||||
matKeys, err := t.logsTblStatementToFieldKeys(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapOfKeys := make(map[string]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range matKeys {
|
||||
mapOfKeys[key.Name+";"+key.FieldContext.StringValue()+";"+key.FieldDataType.StringValue()] = key
|
||||
}
|
||||
|
||||
sb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", `
|
||||
CASE
|
||||
WHEN tag_type = 'logfield' THEN 1
|
||||
WHEN tag_type = 'resource' THEN 2
|
||||
WHEN tag_type = 'scope' THEN 3
|
||||
WHEN tag_type = 'tag' THEN 4
|
||||
ELSE 5
|
||||
END as priority`).From(t.logsDBName + "." + t.logsFieldsTblName)
|
||||
var limit int
|
||||
|
||||
conds := []string{}
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
|
||||
if fieldKeySelector.StartUnixMilli != 0 {
|
||||
conds = append(conds, sb.GE("unix_milli", fieldKeySelector.StartUnixMilli))
|
||||
}
|
||||
if fieldKeySelector.EndUnixMilli != 0 {
|
||||
conds = append(conds, sb.LE("unix_milli", fieldKeySelector.EndUnixMilli))
|
||||
}
|
||||
|
||||
// key part of the selector
|
||||
fieldKeyConds := []string{}
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_key", fieldKeySelector.Name))
|
||||
} else {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.Like("tag_key", "%"+fieldKeySelector.Name+"%"))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldKeySelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_type", fieldKeySelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldKeySelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
fieldKeyConds = append(fieldKeyConds, sb.E("tag_data_type", fieldKeySelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
conds = append(conds, sb.And(fieldKeyConds...))
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
sb.Where(sb.Or(conds...))
|
||||
if limit == 0 {
|
||||
limit = 1000
|
||||
}
|
||||
|
||||
mainSb := sqlbuilder.Select("tag_key", "tag_type", "tag_data_type", "max(priority) as priority")
|
||||
mainSb.From(mainSb.BuilderAs(sb, "sub_query"))
|
||||
mainSb.GroupBy("tag_key", "tag_type", "tag_data_type")
|
||||
mainSb.OrderBy("priority")
|
||||
mainSb.Limit(limit)
|
||||
|
||||
query, args := mainSb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
var priority uint8
|
||||
err = rows.Scan(&name, &fieldContext, &fieldDataType, &priority)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
key, ok := mapOfKeys[name+";"+fieldContext.StringValue()+";"+fieldDataType.StringValue()]
|
||||
|
||||
// if there is no materialised column, create a key with the field context and data type
|
||||
if !ok {
|
||||
key = &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
}
|
||||
}
|
||||
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// getMetricsKeys returns the keys from the metrics that match the field selection criteria
|
||||
// TODO(srikanthccv): update the implementation after the dot metrics migration is done
|
||||
func (t *telemetryMetaStore) getMetricsKeys(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
if len(fieldKeySelectors) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var whereClause, innerWhereClause string
|
||||
var limit int
|
||||
args := []any{}
|
||||
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
if fieldKeySelector.MetricContext != nil {
|
||||
innerWhereClause += "metric_name IN ? AND"
|
||||
args = append(args, fieldKeySelector.MetricContext.MetricName)
|
||||
}
|
||||
}
|
||||
innerWhereClause += " __normalized = true"
|
||||
|
||||
for idx, fieldKeySelector := range fieldKeySelectors {
|
||||
if fieldKeySelector.SelectorMatchType == telemetrytypes.FieldSelectorMatchTypeExact {
|
||||
whereClause += "(distinctTagKey = ? AND distinctTagKey NOT LIKE '\\_\\_%%')"
|
||||
args = append(args, fieldKeySelector.Name)
|
||||
} else {
|
||||
whereClause += "(distinctTagKey ILIKE ? AND distinctTagKey NOT LIKE '\\_\\_%%')"
|
||||
args = append(args, fmt.Sprintf("%%%s%%", fieldKeySelector.Name))
|
||||
}
|
||||
if idx != len(fieldKeySelectors)-1 {
|
||||
whereClause += " OR "
|
||||
}
|
||||
limit += fieldKeySelector.Limit
|
||||
}
|
||||
args = append(args, limit)
|
||||
|
||||
query := fmt.Sprintf(`
|
||||
SELECT
|
||||
arrayJoin(tagKeys) AS distinctTagKey
|
||||
FROM (
|
||||
SELECT JSONExtractKeys(labels) AS tagKeys
|
||||
FROM %s.%s
|
||||
WHERE `+innerWhereClause+`
|
||||
GROUP BY tagKeys
|
||||
)
|
||||
WHERE `+whereClause+`
|
||||
GROUP BY distinctTagKey
|
||||
LIMIT ?
|
||||
`, t.metricsDBName, t.timeseries1WTblName)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
keys := []*telemetrytypes.TelemetryFieldKey{}
|
||||
for rows.Next() {
|
||||
var name string
|
||||
err = rows.Scan(&name)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
key := &telemetrytypes.TelemetryFieldKey{
|
||||
Name: name,
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
}
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
if rows.Err() != nil {
|
||||
return nil, errors.Wrapf(rows.Err(), errors.TypeInternal, errors.CodeInternal, ErrFailedToGetMetricsKeys.Error())
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKeys(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
var keys []*telemetrytypes.TelemetryFieldKey
|
||||
var err error
|
||||
switch fieldKeySelector.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
keys, err = t.getTracesKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalLogs:
|
||||
keys, err = t.getLogsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalMetrics:
|
||||
keys, err = t.getMetricsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
case telemetrytypes.SignalUnspecified:
|
||||
// get traces keys
|
||||
tracesKeys, err := t.getTracesKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, tracesKeys...)
|
||||
|
||||
// get logs keys
|
||||
logsKeys, err := t.getLogsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, logsKeys...)
|
||||
|
||||
// get metrics keys
|
||||
metricsKeys, err := t.getMetricsKeys(ctx, []*telemetrytypes.FieldKeySelector{fieldKeySelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keys = append(keys, metricsKeys...)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapOfKeys := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range keys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
|
||||
return mapOfKeys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKeysMulti(ctx context.Context, fieldKeySelectors []*telemetrytypes.FieldKeySelector) (map[string][]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
|
||||
logsSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
tracesSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
metricsSelectors := []*telemetrytypes.FieldKeySelector{}
|
||||
|
||||
for _, fieldKeySelector := range fieldKeySelectors {
|
||||
switch fieldKeySelector.Signal {
|
||||
case telemetrytypes.SignalLogs:
|
||||
logsSelectors = append(logsSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalTraces:
|
||||
tracesSelectors = append(tracesSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalMetrics:
|
||||
metricsSelectors = append(metricsSelectors, fieldKeySelector)
|
||||
case telemetrytypes.SignalUnspecified:
|
||||
logsSelectors = append(logsSelectors, fieldKeySelector)
|
||||
tracesSelectors = append(tracesSelectors, fieldKeySelector)
|
||||
metricsSelectors = append(metricsSelectors, fieldKeySelector)
|
||||
}
|
||||
}
|
||||
|
||||
logsKeys, err := t.getLogsKeys(ctx, logsSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tracesKeys, err := t.getTracesKeys(ctx, tracesSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metricsKeys, err := t.getMetricsKeys(ctx, metricsSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapOfKeys := make(map[string][]*telemetrytypes.TelemetryFieldKey)
|
||||
for _, key := range logsKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
for _, key := range tracesKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
for _, key := range metricsKeys {
|
||||
mapOfKeys[key.Name] = append(mapOfKeys[key.Name], key)
|
||||
}
|
||||
|
||||
return mapOfKeys, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetKey(ctx context.Context, fieldKeySelector *telemetrytypes.FieldKeySelector) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
keys, err := t.GetKeys(ctx, fieldKeySelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return keys[fieldKeySelector.Name], nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
|
||||
args := []any{}
|
||||
|
||||
var andConditions []string
|
||||
|
||||
andConditions = append(andConditions, `unix_milli >= ?`)
|
||||
args = append(args, fieldValueSelector.StartUnixMilli)
|
||||
|
||||
andConditions = append(andConditions, `unix_milli <= ?`)
|
||||
args = append(args, fieldValueSelector.EndUnixMilli)
|
||||
|
||||
if len(fieldValueSelector.ExistingQuery) != 0 {
|
||||
// TODO(srikanthccv): add the existing query to the where clause
|
||||
}
|
||||
whereClause := strings.Join(andConditions, " AND ")
|
||||
|
||||
key := telemetrytypes.TelemetryFieldKey{
|
||||
Name: fieldValueSelector.Name,
|
||||
Signal: fieldValueSelector.Signal,
|
||||
FieldContext: fieldValueSelector.FieldContext,
|
||||
FieldDataType: fieldValueSelector.FieldDataType,
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): add the select column
|
||||
selectColumn, _ := t.conditionBuilder.GetTableFieldName(ctx, &key)
|
||||
|
||||
args = append(args, fieldValueSelector.Limit)
|
||||
filterSubQuery := fmt.Sprintf(
|
||||
"SELECT DISTINCT %s FROM %s.%s WHERE %s LIMIT ?",
|
||||
selectColumn,
|
||||
t.relatedMetadataDBName,
|
||||
t.relatedMetadataTblName,
|
||||
whereClause,
|
||||
)
|
||||
zap.L().Debug("filterSubQuery for related values", zap.String("query", filterSubQuery), zap.Any("args", args))
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, filterSubQuery, args...)
|
||||
if err != nil {
|
||||
return nil, ErrFailedToGetRelatedValues
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var attributeValues []string
|
||||
for rows.Next() {
|
||||
var value string
|
||||
if err := rows.Scan(&value); err != nil {
|
||||
return nil, ErrFailedToGetRelatedValues
|
||||
}
|
||||
if value != "" {
|
||||
attributeValues = append(attributeValues, value)
|
||||
}
|
||||
}
|
||||
|
||||
return attributeValues, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetRelatedValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) ([]string, error) {
|
||||
return t.getRelatedValues(ctx, fieldValueSelector)
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getSpanFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// build the query to get the keys from the spans that match the field selection criteria
|
||||
var limit int
|
||||
|
||||
sb := sqlbuilder.Select("DISTINCT string_value, number_value").From(t.tracesDBName + "." + t.tracesFieldsTblName)
|
||||
|
||||
if fieldValueSelector.Name != "" {
|
||||
sb.Where(sb.E("tag_key", fieldValueSelector.Name))
|
||||
}
|
||||
|
||||
// now look at the field context
|
||||
if fieldValueSelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
sb.Where(sb.E("tag_type", fieldValueSelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
// now look at the field data type
|
||||
if fieldValueSelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
sb.Where(sb.E("tag_data_type", fieldValueSelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.Value != "" {
|
||||
if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeString {
|
||||
sb.Where(sb.Like("string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeNumber {
|
||||
sb.Where(sb.IsNotNull("number_value"))
|
||||
sb.Where(sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"))
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
sb.Limit(limit)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
seen := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var stringValue string
|
||||
var numberValue float64
|
||||
if err := rows.Scan(&stringValue, &numberValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
if _, ok := seen[stringValue]; !ok {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
}
|
||||
}
|
||||
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getLogFieldValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// build the query to get the keys from the spans that match the field selection criteria
|
||||
var limit int
|
||||
|
||||
sb := sqlbuilder.Select("DISTINCT string_value, number_value").From(t.logsDBName + "." + t.logsFieldsTblName)
|
||||
|
||||
if fieldValueSelector.Name != "" {
|
||||
sb.Where(sb.E("tag_key", fieldValueSelector.Name))
|
||||
}
|
||||
|
||||
if fieldValueSelector.FieldContext != telemetrytypes.FieldContextUnspecified {
|
||||
sb.Where(sb.E("tag_type", fieldValueSelector.FieldContext.TagType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.FieldDataType != telemetrytypes.FieldDataTypeUnspecified {
|
||||
sb.Where(sb.E("tag_data_type", fieldValueSelector.FieldDataType.TagDataType()))
|
||||
}
|
||||
|
||||
if fieldValueSelector.Value != "" {
|
||||
if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeString {
|
||||
sb.Where(sb.Like("string_value", "%"+fieldValueSelector.Value+"%"))
|
||||
} else if fieldValueSelector.FieldDataType == telemetrytypes.FieldDataTypeNumber {
|
||||
sb.Where(sb.IsNotNull("number_value"))
|
||||
sb.Where(sb.Like("toString(number_value)", "%"+fieldValueSelector.Value+"%"))
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
limit = 50
|
||||
}
|
||||
sb.Limit(limit)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := t.telemetrystore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
values := &telemetrytypes.TelemetryFieldValues{}
|
||||
seen := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var stringValue string
|
||||
var numberValue float64
|
||||
if err := rows.Scan(&stringValue, &numberValue); err != nil {
|
||||
return nil, errors.Wrapf(err, errors.TypeInternal, errors.CodeInternal, ErrFailedToGetLogsKeys.Error())
|
||||
}
|
||||
if _, ok := seen[stringValue]; !ok {
|
||||
values.StringValues = append(values.StringValues, stringValue)
|
||||
seen[stringValue] = true
|
||||
}
|
||||
if _, ok := seen[fmt.Sprintf("%f", numberValue)]; !ok && numberValue != 0 {
|
||||
values.NumberValues = append(values.NumberValues, numberValue)
|
||||
seen[fmt.Sprintf("%f", numberValue)] = true
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) getMetricFieldValues(_ context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
// TODO(srikanthccv): implement this. use new tables?
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (t *telemetryMetaStore) GetAllValues(ctx context.Context, fieldValueSelector *telemetrytypes.FieldValueSelector) (*telemetrytypes.TelemetryFieldValues, error) {
|
||||
var values *telemetrytypes.TelemetryFieldValues
|
||||
var err error
|
||||
switch fieldValueSelector.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
values, err = t.getSpanFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalLogs:
|
||||
values, err = t.getLogFieldValues(ctx, fieldValueSelector)
|
||||
case telemetrytypes.SignalMetrics:
|
||||
values, err = t.getMetricFieldValues(ctx, fieldValueSelector)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
86
pkg/telemetrymetadata/metadata_test.go
Normal file
86
pkg/telemetrymetadata/metadata_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrytraces"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
)
|
||||
|
||||
type regexMatcher struct {
|
||||
}
|
||||
|
||||
func (m *regexMatcher) Match(expectedSQL, actualSQL string) error {
|
||||
re, err := regexp.Compile(expectedSQL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !re.MatchString(actualSQL) {
|
||||
return fmt.Errorf("expected query to contain %s, got %s", expectedSQL, actualSQL)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGetKeys(t *testing.T) {
|
||||
mockTelemetryStore := telemetrystoretest.New(telemetrystore.Config{}, ®exMatcher{})
|
||||
mock := mockTelemetryStore.Mock()
|
||||
|
||||
metadata, err := NewTelemetryMetaStore(
|
||||
mockTelemetryStore,
|
||||
telemetrytraces.DBName,
|
||||
telemetrytraces.TagAttributesV2TableName,
|
||||
telemetrytraces.SpanIndexV3TableName,
|
||||
telemetrymetrics.DBName,
|
||||
telemetrymetrics.TimeseriesV41weekTableName,
|
||||
telemetrymetrics.TimeseriesV41weekTableName,
|
||||
telemetrylogs.DBName,
|
||||
telemetrylogs.LogsV2TableName,
|
||||
telemetrylogs.TagAttributesV2TableName,
|
||||
DBName,
|
||||
AttributesMetadataLocalTableName,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create telemetry metadata store: %v", err)
|
||||
}
|
||||
|
||||
rows := cmock.NewRows([]cmock.ColumnType{
|
||||
{Name: "statement", Type: "String"},
|
||||
}, [][]any{{"CREATE TABLE signoz_traces.signoz_index_v3"}})
|
||||
|
||||
mock.
|
||||
ExpectSelect("SHOW CREATE TABLE signoz_traces.distributed_signoz_index_v3").
|
||||
WillReturnRows(rows)
|
||||
|
||||
query := `SELECT.*`
|
||||
|
||||
mock.ExpectQuery(query).
|
||||
WithArgs("%http.method%", telemetrytypes.FieldContextSpan.TagType(), telemetrytypes.FieldDataTypeString.TagDataType(), 10).
|
||||
WillReturnRows(cmock.NewRows([]cmock.ColumnType{
|
||||
{Name: "tag_key", Type: "String"},
|
||||
{Name: "tag_type", Type: "String"},
|
||||
{Name: "tag_data_type", Type: "String"},
|
||||
{Name: "priority", Type: "UInt8"},
|
||||
}, [][]any{{"http.method", "tag", "String", 1}, {"http.method", "tag", "String", 1}}))
|
||||
keys, err := metadata.GetKeys(context.Background(), &telemetrytypes.FieldKeySelector{
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Name: "http.method",
|
||||
Limit: 10,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get keys: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Keys: %v", keys)
|
||||
}
|
||||
132
pkg/telemetrymetadata/stmt_parse.go
Normal file
132
pkg/telemetrymetadata/stmt_parse.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/AfterShip/clickhouse-sql-parser/parser"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// TelemetryFieldVisitor is an AST visitor for extracting telemetry fields
|
||||
type TelemetryFieldVisitor struct {
|
||||
parser.DefaultASTVisitor
|
||||
Fields []*telemetrytypes.TelemetryFieldKey
|
||||
}
|
||||
|
||||
func NewTelemetryFieldVisitor() *TelemetryFieldVisitor {
|
||||
return &TelemetryFieldVisitor{
|
||||
Fields: make([]*telemetrytypes.TelemetryFieldKey, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// VisitColumnDef is called when visiting a column definition
|
||||
func (v *TelemetryFieldVisitor) VisitColumnDef(expr *parser.ColumnDef) error {
|
||||
// Check if this is a materialized column with DEFAULT expression
|
||||
if expr.DefaultExpr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse column name to extract context and data type
|
||||
columnName := expr.Name.String()
|
||||
|
||||
// Remove backticks if present
|
||||
columnName = strings.TrimPrefix(columnName, "`")
|
||||
columnName = strings.TrimSuffix(columnName, "`")
|
||||
|
||||
// Parse the column name to extract components
|
||||
parts := strings.Split(columnName, "_")
|
||||
if len(parts) < 2 {
|
||||
return nil
|
||||
}
|
||||
|
||||
context := parts[0]
|
||||
dataType := parts[1]
|
||||
|
||||
// Check if this is a valid telemetry column
|
||||
var fieldContext telemetrytypes.FieldContext
|
||||
switch context {
|
||||
case "resource":
|
||||
fieldContext = telemetrytypes.FieldContextResource
|
||||
case "scope":
|
||||
fieldContext = telemetrytypes.FieldContextScope
|
||||
case "attribute":
|
||||
fieldContext = telemetrytypes.FieldContextAttribute
|
||||
default:
|
||||
return nil // Not a telemetry column
|
||||
}
|
||||
|
||||
// Check and convert data type
|
||||
var fieldDataType telemetrytypes.FieldDataType
|
||||
switch dataType {
|
||||
case "string":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeString
|
||||
case "bool":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeBool
|
||||
case "int", "int64":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
case "float", "float64":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
case "number":
|
||||
fieldDataType = telemetrytypes.FieldDataTypeFloat64
|
||||
default:
|
||||
return nil // Unknown data type
|
||||
}
|
||||
|
||||
// Extract field name from the DEFAULT expression
|
||||
// The DEFAULT expression should be something like: resources_string['k8s.cluster.name']
|
||||
// We need to extract the key inside the square brackets
|
||||
defaultExprStr := expr.DefaultExpr.String()
|
||||
|
||||
// Look for the pattern: map['key']
|
||||
startIdx := strings.Index(defaultExprStr, "['")
|
||||
endIdx := strings.Index(defaultExprStr, "']")
|
||||
|
||||
if startIdx == -1 || endIdx == -1 || startIdx+2 >= endIdx {
|
||||
return nil // Invalid DEFAULT expression format
|
||||
}
|
||||
|
||||
fieldName := defaultExprStr[startIdx+2 : endIdx]
|
||||
|
||||
// Create and store the TelemetryFieldKey
|
||||
field := telemetrytypes.TelemetryFieldKey{
|
||||
Name: fieldName,
|
||||
FieldContext: fieldContext,
|
||||
FieldDataType: fieldDataType,
|
||||
Materialized: true,
|
||||
}
|
||||
|
||||
v.Fields = append(v.Fields, &field)
|
||||
return nil
|
||||
}
|
||||
|
||||
func ExtractFieldKeysFromTblStatement(statement string) ([]*telemetrytypes.TelemetryFieldKey, error) {
|
||||
// Parse the CREATE TABLE statement using the ClickHouse parser
|
||||
p := parser.NewParser(statement)
|
||||
stmts, err := p.ParseStmts()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create a visitor to collect telemetry fields
|
||||
visitor := NewTelemetryFieldVisitor()
|
||||
|
||||
// Visit each statement
|
||||
for _, stmt := range stmts {
|
||||
// We're looking for CreateTable statements
|
||||
createTable, ok := stmt.(*parser.CreateTable)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Visit the table schema to extract column definitions
|
||||
if createTable.TableSchema != nil {
|
||||
for _, column := range createTable.TableSchema.Columns {
|
||||
if err := column.Accept(visitor); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return visitor.Fields, nil
|
||||
}
|
||||
148
pkg/telemetrymetadata/stmt_parse_test.go
Normal file
148
pkg/telemetrymetadata/stmt_parse_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package telemetrymetadata
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
func TestExtractFieldKeysFromTblStatement(t *testing.T) {
|
||||
|
||||
var statement = `CREATE TABLE signoz_logs.logs_v2
|
||||
(
|
||||
` + "`ts_bucket_start`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`resource_fingerprint`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`timestamp`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`observed_timestamp`" + ` UInt64 CODEC(DoubleDelta, LZ4),
|
||||
` + "`id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`trace_id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`span_id`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`trace_flags`" + ` UInt32,
|
||||
` + "`severity_text`" + ` LowCardinality(String) CODEC(ZSTD(1)),
|
||||
` + "`severity_number`" + ` UInt8,
|
||||
` + "`body`" + ` String CODEC(ZSTD(2)),
|
||||
` + "`attributes_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`attributes_number`" + ` Map(LowCardinality(String), Float64) CODEC(ZSTD(1)),
|
||||
` + "`attributes_bool`" + ` Map(LowCardinality(String), Bool) CODEC(ZSTD(1)),
|
||||
` + "`resources_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`scope_name`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`scope_version`" + ` String CODEC(ZSTD(1)),
|
||||
` + "`scope_string`" + ` Map(LowCardinality(String), String) CODEC(ZSTD(1)),
|
||||
` + "`attribute_number_input_size`" + ` Int64 DEFAULT attributes_number['input_size'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_number_input_size_exists`" + ` Bool DEFAULT if(mapContains(attributes_number, 'input_size') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$iostream`" + ` String DEFAULT attributes_string['log.iostream'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$iostream_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'log.iostream') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$file$$path`" + ` String DEFAULT attributes_string['log.file.path'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_log$$file$$path_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'log.file.path') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$cluster$$name`" + ` String DEFAULT resources_string['k8s.cluster.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$cluster$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.cluster.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$namespace$$name`" + ` String DEFAULT resources_string['k8s.namespace.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$namespace$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.namespace.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$pod$$name`" + ` String DEFAULT resources_string['k8s.pod.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$pod$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.pod.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$node$$name`" + ` String DEFAULT resources_string['k8s.node.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$node$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.node.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$container$$name`" + ` String DEFAULT resources_string['k8s.container.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$container$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.container.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$deployment$$name`" + ` String DEFAULT resources_string['k8s.deployment.name'] CODEC(ZSTD(1)),
|
||||
` + "`resource_string_k8s$$deployment$$name_exists`" + ` Bool DEFAULT if(mapContains(resources_string, 'k8s.deployment.name') != 0, true, false) CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_processor`" + ` String DEFAULT attributes_string['processor'] CODEC(ZSTD(1)),
|
||||
` + "`attribute_string_processor_exists`" + ` Bool DEFAULT if(mapContains(attributes_string, 'processor') != 0, true, false) CODEC(ZSTD(1)),
|
||||
INDEX body_idx lower(body) TYPE ngrambf_v1(4, 60000, 5, 0) GRANULARITY 1,
|
||||
INDEX id_minmax id TYPE minmax GRANULARITY 1,
|
||||
INDEX severity_number_idx severity_number TYPE set(25) GRANULARITY 4,
|
||||
INDEX severity_text_idx severity_text TYPE set(25) GRANULARITY 4,
|
||||
INDEX trace_flags_idx trace_flags TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX scope_name_idx scope_name TYPE tokenbf_v1(10240, 3, 0) GRANULARITY 4,
|
||||
INDEX ` + "`resource_string_k8s$$cluster$$name_idx`" + ` ` + "`resource_string_k8s$$cluster$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$namespace$$name_idx`" + ` ` + "`resource_string_k8s$$namespace$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$pod$$name_idx`" + ` ` + "`resource_string_k8s$$pod$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$node$$name_idx`" + ` ` + "`resource_string_k8s$$node$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$container$$name_idx`" + ` ` + "`resource_string_k8s$$container$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX ` + "`resource_string_k8s$$deployment$$name_idx`" + ` ` + "`resource_string_k8s$$deployment$$name`" + ` TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX attribute_string_processor_idx attribute_string_processor TYPE bloom_filter(0.01) GRANULARITY 64
|
||||
)
|
||||
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')
|
||||
PARTITION BY toDate(timestamp / 1000000000)
|
||||
ORDER BY (ts_bucket_start, resource_fingerprint, severity_text, timestamp, id)
|
||||
TTL toDateTime(timestamp / 1000000000) + toIntervalSecond(2592000)
|
||||
SETTINGS ttl_only_drop_parts = 1, index_granularity = 8192`
|
||||
|
||||
keys, err := ExtractFieldKeysFromTblStatement(statement)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to extract field keys from tbl statement: %v", err)
|
||||
}
|
||||
|
||||
// some expected keys
|
||||
expectedKeys := []*telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "k8s.pod.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.cluster.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.namespace.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.deployment.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.node.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "k8s.container.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "processor",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "input_size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "log.iostream",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
{
|
||||
Name: "log.file.path",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
Materialized: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, key := range expectedKeys {
|
||||
if !slices.ContainsFunc(keys, func(k *telemetrytypes.TelemetryFieldKey) bool {
|
||||
return k.Name == key.Name && k.FieldContext == key.FieldContext && k.FieldDataType == key.FieldDataType && k.Materialized == key.Materialized
|
||||
}) {
|
||||
t.Errorf("expected key %v not found", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
7
pkg/telemetrymetadata/tables.go
Normal file
7
pkg/telemetrymetadata/tables.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package telemetrymetadata
|
||||
|
||||
const (
|
||||
DBName = "signoz_metadata"
|
||||
AttributesMetadataTableName = "distributed_attributes_metadata"
|
||||
AttributesMetadataLocalTableName = "attributes_metadata"
|
||||
)
|
||||
21
pkg/telemetrymetrics/tables.go
Normal file
21
pkg/telemetrymetrics/tables.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package telemetrymetrics
|
||||
|
||||
const (
|
||||
DBName = "signoz_metrics"
|
||||
SamplesV4TableName = "distributed_samples_v4"
|
||||
SamplesV4LocalTableName = "samples_v4"
|
||||
SamplesV4Agg5mTableName = "distributed_samples_v4_agg_5m"
|
||||
SamplesV4Agg5mLocalTableName = "samples_v4_agg_5m"
|
||||
SamplesV4Agg30mTableName = "distributed_samples_v4_agg_30m"
|
||||
SamplesV4Agg30mLocalTableName = "samples_v4_agg_30m"
|
||||
ExpHistogramTableName = "distributed_exp_hist"
|
||||
ExpHistogramLocalTableName = "exp_hist"
|
||||
TimeseriesV4TableName = "distributed_time_series_v4"
|
||||
TimeseriesV4LocalTableName = "time_series_v4"
|
||||
TimeseriesV46hrsTableName = "distributed_time_series_v4_6hrs"
|
||||
TimeseriesV46hrsLocalTableName = "time_series_v4_6hrs"
|
||||
TimeseriesV41dayTableName = "distributed_time_series_v4_1day"
|
||||
TimeseriesV41dayLocalTableName = "time_series_v4_1day"
|
||||
TimeseriesV41weekTableName = "distributed_time_series_v4_1week"
|
||||
TimeseriesV41weekLocalTableName = "time_series_v4_1week"
|
||||
)
|
||||
@@ -2,6 +2,8 @@ package telemetrystorehook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
@@ -30,12 +32,21 @@ func (logging) BeforeQuery(ctx context.Context, event *telemetrystore.QueryEvent
|
||||
}
|
||||
|
||||
func (hook *logging) AfterQuery(ctx context.Context, event *telemetrystore.QueryEvent) {
|
||||
hook.logger.Log(
|
||||
ctx,
|
||||
hook.level,
|
||||
"::TELEMETRYSTORE-QUERY::",
|
||||
level := hook.level
|
||||
args := []any{
|
||||
"db.query.text", event.Query,
|
||||
"db.query.args", event.QueryArgs,
|
||||
"db.duration", time.Since(event.StartTime).String(),
|
||||
}
|
||||
if event.Err != nil && !errors.Is(event.Err, sql.ErrNoRows) && !errors.Is(event.Err, context.Canceled) {
|
||||
level = slog.LevelError
|
||||
args = append(args, "db.query.error", event.Err)
|
||||
}
|
||||
|
||||
hook.logger.Log(
|
||||
ctx,
|
||||
level,
|
||||
"::TELEMETRYSTORE-QUERY::",
|
||||
args...,
|
||||
)
|
||||
}
|
||||
|
||||
352
pkg/telemetrytraces/condition_builder.go
Normal file
352
pkg/telemetrytraces/condition_builder.go
Normal file
@@ -0,0 +1,352 @@
|
||||
package telemetrytraces
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
indexV3Columns = map[string]*schema.Column{
|
||||
"ts_bucket_start": {Name: "ts_bucket_start", Type: schema.ColumnTypeUInt64},
|
||||
"resource_fingerprint": {Name: "resource_fingerprint", Type: schema.ColumnTypeString},
|
||||
|
||||
// intrinsic columns
|
||||
"timestamp": {Name: "timestamp", Type: schema.DateTime64ColumnType{Precision: 9, Timezone: "UTC"}},
|
||||
"trace_id": {Name: "trace_id", Type: schema.FixedStringColumnType{Length: 32}},
|
||||
"span_id": {Name: "span_id", Type: schema.ColumnTypeString},
|
||||
"trace_state": {Name: "trace_state", Type: schema.ColumnTypeString},
|
||||
"parent_span_id": {Name: "parent_span_id", Type: schema.ColumnTypeString},
|
||||
"flags": {Name: "flags", Type: schema.ColumnTypeUInt32},
|
||||
"name": {Name: "name", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"kind": {Name: "kind", Type: schema.ColumnTypeInt8},
|
||||
"kind_string": {Name: "kind_string", Type: schema.ColumnTypeString},
|
||||
"duration_nano": {Name: "duration_nano", Type: schema.ColumnTypeUInt64},
|
||||
"status_code": {Name: "status_code", Type: schema.ColumnTypeInt16},
|
||||
"status_message": {Name: "status_message", Type: schema.ColumnTypeString},
|
||||
"status_code_string": {Name: "status_code_string", Type: schema.ColumnTypeString},
|
||||
|
||||
// attributes columns
|
||||
"attributes_string": {Name: "attributes_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
"attributes_number": {Name: "attributes_number", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}},
|
||||
"attributes_bool": {Name: "attributes_bool", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}},
|
||||
"resources_string": {Name: "resources_string", Type: schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}},
|
||||
|
||||
"events": {Name: "events", Type: schema.ArrayColumnType{
|
||||
ElementType: schema.ColumnTypeString,
|
||||
}},
|
||||
"links": {Name: "links", Type: schema.ColumnTypeString},
|
||||
// derived columns
|
||||
"response_status_code": {Name: "response_status_code", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"external_http_url": {Name: "external_http_url", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"http_url": {Name: "http_url", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"external_http_method": {Name: "external_http_method", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"http_method": {Name: "http_method", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"http_host": {Name: "http_host", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"db_name": {Name: "db_name", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"db_operation": {Name: "db_operation", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"has_error": {Name: "has_error", Type: schema.ColumnTypeBool},
|
||||
"is_remote": {Name: "is_remote", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
// materialized columns
|
||||
"resource_string_service$$name": {Name: "resource_string_service$$name", Type: schema.ColumnTypeString},
|
||||
"attribute_string_http$$route": {Name: "attribute_string_http$$route", Type: schema.ColumnTypeString},
|
||||
"attribute_string_messaging$$system": {Name: "attribute_string_messaging$$system", Type: schema.ColumnTypeString},
|
||||
"attribute_string_messaging$$operation": {Name: "attribute_string_messaging$$operation", Type: schema.ColumnTypeString},
|
||||
"attribute_string_db$$system": {Name: "attribute_string_db$$system", Type: schema.ColumnTypeString},
|
||||
"attribute_string_rpc$$system": {Name: "attribute_string_rpc$$system", Type: schema.ColumnTypeString},
|
||||
"attribute_string_rpc$$service": {Name: "attribute_string_rpc$$service", Type: schema.ColumnTypeString},
|
||||
"attribute_string_rpc$$method": {Name: "attribute_string_rpc$$method", Type: schema.ColumnTypeString},
|
||||
"attribute_string_peer$$service": {Name: "attribute_string_peer$$service", Type: schema.ColumnTypeString},
|
||||
|
||||
// deprecated intrinsic columns
|
||||
"traceID": {Name: "traceID", Type: schema.FixedStringColumnType{Length: 32}},
|
||||
"spanID": {Name: "spanID", Type: schema.ColumnTypeString},
|
||||
"parentSpanID": {Name: "parentSpanID", Type: schema.ColumnTypeString},
|
||||
"spanKind": {Name: "spanKind", Type: schema.ColumnTypeString},
|
||||
"durationNano": {Name: "durationNano", Type: schema.ColumnTypeUInt64},
|
||||
"statusCode": {Name: "statusCode", Type: schema.ColumnTypeInt16},
|
||||
"statusMessage": {Name: "statusMessage", Type: schema.ColumnTypeString},
|
||||
"statusCodeString": {Name: "statusCodeString", Type: schema.ColumnTypeString},
|
||||
|
||||
// deprecated derived columns
|
||||
"references": {Name: "references", Type: schema.ColumnTypeString},
|
||||
"responseStatusCode": {Name: "responseStatusCode", Type: schema.ColumnTypeString},
|
||||
"externalHttpUrl": {Name: "externalHttpUrl", Type: schema.ColumnTypeString},
|
||||
"httpUrl": {Name: "httpUrl", Type: schema.ColumnTypeString},
|
||||
"externalHttpMethod": {Name: "externalHttpMethod", Type: schema.ColumnTypeString},
|
||||
"httpMethod": {Name: "httpMethod", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"httpHost": {Name: "httpHost", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"dbName": {Name: "dbName", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"dbOperation": {Name: "dbOperation", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"hasError": {Name: "hasError", Type: schema.ColumnTypeBool},
|
||||
"isRemote": {Name: "isRemote", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"serviceName": {Name: "serviceName", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"httpRoute": {Name: "httpRoute", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"msgSystem": {Name: "msgSystem", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"msgOperation": {Name: "msgOperation", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"dbSystem": {Name: "dbSystem", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"rpcSystem": {Name: "rpcSystem", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"rpcService": {Name: "rpcService", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"rpcMethod": {Name: "rpcMethod", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
"peerService": {Name: "peerService", Type: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString}},
|
||||
|
||||
// materialized exists columns
|
||||
"resource_string_service$$name_exists": {Name: "resource_string_service$$name_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_http$$route_exists": {Name: "attribute_string_http$$route_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_messaging$$system_exists": {Name: "attribute_string_messaging$$system_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_messaging$$operation_exists": {Name: "attribute_string_messaging$$operation_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_db$$system_exists": {Name: "attribute_string_db$$system_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_rpc$$system_exists": {Name: "attribute_string_rpc$$system_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_rpc$$service_exists": {Name: "attribute_string_rpc$$service_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_rpc$$method_exists": {Name: "attribute_string_rpc$$method_exists", Type: schema.ColumnTypeBool},
|
||||
"attribute_string_peer$$service_exists": {Name: "attribute_string_peer$$service_exists", Type: schema.ColumnTypeBool},
|
||||
}
|
||||
)
|
||||
|
||||
// interface check
|
||||
var _ qbtypes.ConditionBuilder = &conditionBuilder{}
|
||||
|
||||
type conditionBuilder struct {
|
||||
}
|
||||
|
||||
func NewConditionBuilder() qbtypes.ConditionBuilder {
|
||||
return &conditionBuilder{}
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error) {
|
||||
|
||||
switch key.FieldContext {
|
||||
case telemetrytypes.FieldContextResource:
|
||||
return indexV3Columns["resources_string"], nil
|
||||
case telemetrytypes.FieldContextScope:
|
||||
// we don't have scope data stored in the spans yet
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
case telemetrytypes.FieldContextAttribute:
|
||||
switch key.FieldDataType {
|
||||
case telemetrytypes.FieldDataTypeString:
|
||||
return indexV3Columns["attributes_string"], nil
|
||||
case telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeNumber:
|
||||
return indexV3Columns["attributes_number"], nil
|
||||
case telemetrytypes.FieldDataTypeBool:
|
||||
return indexV3Columns["attributes_bool"], nil
|
||||
}
|
||||
case telemetrytypes.FieldContextSpan:
|
||||
col, ok := indexV3Columns[key.Name]
|
||||
if !ok {
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
return col, nil
|
||||
}
|
||||
|
||||
return nil, qbtypes.ErrColumnNotFound
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeInt8,
|
||||
schema.ColumnTypeInt16,
|
||||
schema.ColumnTypeBool,
|
||||
schema.DateTime64ColumnType{Precision: 9, Timezone: "UTC"},
|
||||
schema.FixedStringColumnType{Length: 32}:
|
||||
return column.Name, nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}:
|
||||
// a key could have been materialized, if so return the materialized column name
|
||||
if key.Materialized {
|
||||
return telemetrytypes.FieldKeyToMaterializedColumnName(key), nil
|
||||
}
|
||||
return fmt.Sprintf("%s['%s']", column.Name, key.Name), nil
|
||||
}
|
||||
// should not reach here
|
||||
return column.Name, nil
|
||||
}
|
||||
|
||||
func (c *conditionBuilder) GetCondition(
|
||||
ctx context.Context,
|
||||
key *telemetrytypes.TelemetryFieldKey,
|
||||
operator qbtypes.FilterOperator,
|
||||
value any,
|
||||
sb *sqlbuilder.SelectBuilder,
|
||||
) (string, error) {
|
||||
column, err := c.GetColumn(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, err := c.GetTableFieldName(ctx, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
|
||||
|
||||
// regular operators
|
||||
switch operator {
|
||||
// regular operators
|
||||
case qbtypes.FilterOperatorEqual:
|
||||
return sb.E(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotEqual:
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThan:
|
||||
return sb.G(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorGreaterThanOrEq:
|
||||
return sb.GE(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThan:
|
||||
return sb.LT(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorLessThanOrEq:
|
||||
return sb.LE(tblFieldName, value), nil
|
||||
|
||||
// like and not like
|
||||
case qbtypes.FilterOperatorLike:
|
||||
return sb.Like(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotLike:
|
||||
return sb.NotLike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorILike:
|
||||
return sb.ILike(tblFieldName, value), nil
|
||||
case qbtypes.FilterOperatorNotILike:
|
||||
return sb.NotILike(tblFieldName, value), nil
|
||||
|
||||
case qbtypes.FilterOperatorContains:
|
||||
return sb.ILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
case qbtypes.FilterOperatorNotContains:
|
||||
return sb.NotILike(tblFieldName, fmt.Sprintf("%%%s%%", value)), nil
|
||||
|
||||
case qbtypes.FilterOperatorRegexp:
|
||||
exp := fmt.Sprintf(`match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
case qbtypes.FilterOperatorNotRegexp:
|
||||
exp := fmt.Sprintf(`not match(%s, %s)`, tblFieldName, sb.Var(value))
|
||||
return sb.And(exp), nil
|
||||
|
||||
// between and not between
|
||||
case qbtypes.FilterOperatorBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.Between(tblFieldName, values[0], values[1]), nil
|
||||
case qbtypes.FilterOperatorNotBetween:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
if len(values) != 2 {
|
||||
return "", qbtypes.ErrBetweenValues
|
||||
}
|
||||
return sb.NotBetween(tblFieldName, values[0], values[1]), nil
|
||||
|
||||
// in and not in
|
||||
case qbtypes.FilterOperatorIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.In(tblFieldName, values...), nil
|
||||
case qbtypes.FilterOperatorNotIn:
|
||||
values, ok := value.([]any)
|
||||
if !ok {
|
||||
return "", qbtypes.ErrInValues
|
||||
}
|
||||
return sb.NotIn(tblFieldName, values...), nil
|
||||
|
||||
// exists and not exists
|
||||
// in the query builder, `exists` and `not exists` are used for
|
||||
// key membership checks, so depending on the column type, the condition changes
|
||||
case qbtypes.FilterOperatorExists, qbtypes.FilterOperatorNotExists:
|
||||
var value any
|
||||
switch column.Type {
|
||||
case schema.ColumnTypeString,
|
||||
schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
schema.FixedStringColumnType{Length: 32},
|
||||
schema.DateTime64ColumnType{Precision: 9, Timezone: "UTC"}:
|
||||
value = ""
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.ColumnTypeUInt64,
|
||||
schema.ColumnTypeUInt32,
|
||||
schema.ColumnTypeUInt8,
|
||||
schema.ColumnTypeInt8,
|
||||
schema.ColumnTypeInt16,
|
||||
schema.ColumnTypeBool:
|
||||
value = 0
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.NE(tblFieldName, value), nil
|
||||
} else {
|
||||
return sb.E(tblFieldName, value), nil
|
||||
}
|
||||
case schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeString,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeBool,
|
||||
}, schema.MapColumnType{
|
||||
KeyType: schema.LowCardinalityColumnType{ElementType: schema.ColumnTypeString},
|
||||
ValueType: schema.ColumnTypeFloat64,
|
||||
}:
|
||||
leftOperand := fmt.Sprintf("mapContains(%s, '%s')", column.Name, key.Name)
|
||||
if key.Materialized {
|
||||
leftOperand = telemetrytypes.FieldKeyToMaterializedColumnNameForExists(key)
|
||||
}
|
||||
if operator == qbtypes.FilterOperatorExists {
|
||||
return sb.E(leftOperand, true), nil
|
||||
} else {
|
||||
return sb.NE(leftOperand, true), nil
|
||||
}
|
||||
default:
|
||||
return "", fmt.Errorf("exists operator is not supported for column type %s", column.Type)
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
298
pkg/telemetrytraces/condition_builder_test.go
Normal file
298
pkg/telemetrytraces/condition_builder_test.go
Normal file
@@ -0,0 +1,298 @@
|
||||
package telemetrytraces
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetFieldKeyName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := &conditionBuilder{}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
expectedResult string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Simple column type - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
expectedResult: "timestamp",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
expectedResult: "attributes_string['user.id']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
expectedResult: "attributes_number['request.size']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - bool attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.success",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeBool,
|
||||
},
|
||||
expectedResult: "attributes_bool['request.success']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Map column type - resource attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
expectedResult: "resources_string['service.name']",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
expectedResult: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result, err := conditionBuilder.GetTableFieldName(ctx, &tc.key)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tc.expectedResult, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCondition(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
conditionBuilder := NewConditionBuilder()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
key telemetrytypes.TelemetryFieldKey
|
||||
operator qbtypes.FilterOperator
|
||||
value any
|
||||
expectedSQL string
|
||||
expectedError error
|
||||
}{
|
||||
{
|
||||
name: "Not Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotEqual,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.duration",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThan,
|
||||
value: float64(100),
|
||||
expectedSQL: "attributes_number['request.duration'] > ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than operator - number attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "request.size",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThan,
|
||||
value: float64(1024),
|
||||
expectedSQL: "attributes_number['request.size'] < ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Greater Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorGreaterThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp >= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Less Than Or Equal operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorLessThanOrEq,
|
||||
value: uint64(1617979338000000000),
|
||||
expectedSQL: "timestamp <= ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not ILike operator - string attribute",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotILike,
|
||||
value: "%admin%",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) NOT LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Between operator - invalid value",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: "invalid",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Between operator - insufficient values",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorBetween,
|
||||
value: []any{uint64(1617979338000000000)},
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrBetweenValues,
|
||||
},
|
||||
{
|
||||
name: "Not Between operator - timestamp",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotBetween,
|
||||
value: []any{uint64(1617979338000000000), uint64(1617979348000000000)},
|
||||
expectedSQL: "timestamp NOT BETWEEN ? AND ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') = ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Not Exists operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorNotExists,
|
||||
value: nil,
|
||||
expectedSQL: "mapContains(attributes_string, 'user.id') <> ?",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Contains operator - map field",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.id",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorContains,
|
||||
value: "admin",
|
||||
expectedSQL: "WHERE LOWER(attributes_string['user.id']) LIKE LOWER(?)",
|
||||
expectedError: nil,
|
||||
},
|
||||
{
|
||||
name: "Non-existent column",
|
||||
key: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "nonexistent_field",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
operator: qbtypes.FilterOperatorEqual,
|
||||
value: "value",
|
||||
expectedSQL: "",
|
||||
expectedError: qbtypes.ErrColumnNotFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cond, err := conditionBuilder.GetCondition(ctx, &tc.key, tc.operator, tc.value, sb)
|
||||
sb.Where(cond)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
assert.Equal(t, tc.expectedError, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
sql, _ := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
assert.Contains(t, sql, tc.expectedSQL)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
10
pkg/telemetrytraces/tables.go
Normal file
10
pkg/telemetrytraces/tables.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package telemetrytraces
|
||||
|
||||
const (
|
||||
DBName = "signoz_traces"
|
||||
SpanIndexV3TableName = "distributed_signoz_index_v3"
|
||||
SpanIndexV3LocalTableName = "signoz_index_v3"
|
||||
TagAttributesV2TableName = "distributed_tag_attributes_v2"
|
||||
TagAttributesV2LocalTableName = "tag_attributes_v2"
|
||||
TopLevelOperationsTableName = "distributed_top_level_operations"
|
||||
)
|
||||
@@ -1,37 +1,246 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type Integration struct {
|
||||
bun.BaseModel `bun:"table:integrations_installed"`
|
||||
type IntegrationUserEmail string
|
||||
|
||||
IntegrationID string `bun:"integration_id,pk,type:text"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
InstalledAt time.Time `bun:"installed_at,default:current_timestamp"`
|
||||
const (
|
||||
AWSIntegrationUserEmail IntegrationUserEmail = "aws-integration@signoz.io"
|
||||
)
|
||||
|
||||
var AllIntegrationUserEmails = []IntegrationUserEmail{
|
||||
AWSIntegrationUserEmail,
|
||||
}
|
||||
|
||||
type CloudIntegrationAccount struct {
|
||||
bun.BaseModel `bun:"table:cloud_integrations_accounts"`
|
||||
// --------------------------------------------------------------------------
|
||||
// Normal integration uses just the installed_integration table
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
CloudProvider string `bun:"cloud_provider,type:text,unique:cloud_provider_id"`
|
||||
ID string `bun:"id,type:text,notnull,unique:cloud_provider_id"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
CloudAccountID string `bun:"cloud_account_id,type:text"`
|
||||
LastAgentReportJSON string `bun:"last_agent_report_json,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,notnull,default:current_timestamp"`
|
||||
RemovedAt time.Time `bun:"removed_at,type:timestamp"`
|
||||
type InstalledIntegration struct {
|
||||
bun.BaseModel `bun:"table:installed_integration"`
|
||||
|
||||
Identifiable
|
||||
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
|
||||
Config InstalledIntegrationConfig `json:"config" bun:"config,type:text"`
|
||||
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
|
||||
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type,references:organizations(id),on_delete:cascade"`
|
||||
}
|
||||
|
||||
type CloudIntegrationServiceConfig struct {
|
||||
bun.BaseModel `bun:"table:cloud_integrations_service_configs"`
|
||||
type InstalledIntegrationConfig map[string]interface{}
|
||||
|
||||
CloudProvider string `bun:"cloud_provider,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
CloudAccountID string `bun:"cloud_account_id,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
ServiceID string `bun:"service_id,type:text,notnull,unique:service_cloud_provider_account"`
|
||||
ConfigJSON string `bun:"config_json,type:text"`
|
||||
CreatedAt time.Time `bun:"created_at,default:current_timestamp"`
|
||||
// For serializing from db
|
||||
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return fmt.Errorf("tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
|
||||
filterSetJson, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not serialize integration config to JSON")
|
||||
}
|
||||
return filterSetJson, nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Cloud integration uses the cloud_integration table
|
||||
// and cloud_integrations_service table
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
type CloudIntegration struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration"`
|
||||
|
||||
Identifiable
|
||||
TimeAuditable
|
||||
Provider string `json:"provider" bun:"provider,type:text,unique:provider_id"`
|
||||
Config *AccountConfig `json:"config" bun:"config,type:text"`
|
||||
AccountID *string `json:"account_id" bun:"account_id,type:text"`
|
||||
LastAgentReport *AgentReport `json:"last_agent_report" bun:"last_agent_report,type:text"`
|
||||
RemovedAt *time.Time `json:"removed_at" bun:"removed_at,type:timestamp,nullzero"`
|
||||
OrgID string `bun:"org_id,type:text,unique:provider_id"`
|
||||
}
|
||||
|
||||
func (a *CloudIntegration) Status() AccountStatus {
|
||||
status := AccountStatus{}
|
||||
if a.LastAgentReport != nil {
|
||||
lastHeartbeat := a.LastAgentReport.TimestampMillis
|
||||
status.Integration.LastHeartbeatTsMillis = &lastHeartbeat
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (a *CloudIntegration) Account() Account {
|
||||
ca := Account{Id: a.ID.StringValue(), Status: a.Status()}
|
||||
|
||||
if a.AccountID != nil {
|
||||
ca.CloudAccountId = *a.AccountID
|
||||
}
|
||||
|
||||
if a.Config != nil {
|
||||
ca.Config = *a.Config
|
||||
} else {
|
||||
ca.Config = DefaultAccountConfig()
|
||||
}
|
||||
return ca
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config AccountConfig `json:"config"`
|
||||
Status AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
type AccountStatus struct {
|
||||
Integration AccountIntegrationStatus `json:"integration"`
|
||||
}
|
||||
|
||||
type AccountIntegrationStatus struct {
|
||||
LastHeartbeatTsMillis *int64 `json:"last_heartbeat_ts_ms"`
|
||||
}
|
||||
|
||||
func DefaultAccountConfig() AccountConfig {
|
||||
return AccountConfig{
|
||||
EnabledRegions: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
type AccountConfig struct {
|
||||
EnabledRegions []string `json:"regions"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *AccountConfig) Scan(src any) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return fmt.Errorf("tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *AccountConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize cloud account config to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type AgentReport struct {
|
||||
TimestampMillis int64 `json:"timestamp_millis"`
|
||||
Data map[string]any `json:"data"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (r *AgentReport) Scan(src any) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
data = v
|
||||
case string:
|
||||
data = []byte(v)
|
||||
default:
|
||||
return fmt.Errorf("tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, r)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (r *AgentReport) Value() (driver.Value, error) {
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize agent report to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
type CloudIntegrationService struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
|
||||
|
||||
Identifiable
|
||||
TimeAuditable
|
||||
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
|
||||
Config CloudServiceConfig `bun:"config,type:text"`
|
||||
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type,references:cloud_integrations(id),on_delete:cascade"`
|
||||
}
|
||||
|
||||
type CloudServiceLogsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceMetricsConfig struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
type CloudServiceConfig struct {
|
||||
Logs *CloudServiceLogsConfig `json:"logs,omitempty"`
|
||||
Metrics *CloudServiceMetricsConfig `json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
// For serializing from db
|
||||
func (c *CloudServiceConfig) Scan(src any) error {
|
||||
var data []byte
|
||||
switch src := src.(type) {
|
||||
case []byte:
|
||||
data = src
|
||||
case string:
|
||||
data = []byte(src)
|
||||
default:
|
||||
return fmt.Errorf("tried to scan from %T instead of string or bytes", src)
|
||||
}
|
||||
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
// For serializing to db
|
||||
func (c *CloudServiceConfig) Value() (driver.Value, error) {
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
serialized, err := json.Marshal(c)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't serialize cloud service config to JSON: %w", err,
|
||||
)
|
||||
}
|
||||
return serialized, nil
|
||||
}
|
||||
|
||||
61
pkg/types/querybuildertypes/querybuildertypesv5/qb.go
Normal file
61
pkg/types/querybuildertypes/querybuildertypesv5/qb.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
schema "github.com/SigNoz/signoz-otel-collector/cmd/signozschemamigrator/schema_migrator"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrColumnNotFound = errors.Newf(errors.TypeNotFound, errors.CodeNotFound, "column not found")
|
||||
ErrBetweenValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) between operator requires two values")
|
||||
ErrInValues = errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "(not) in operator requires a list of values")
|
||||
)
|
||||
|
||||
// FilterOperator is the operator for the filter.
|
||||
type FilterOperator int
|
||||
|
||||
const (
|
||||
FilterOperatorUnknown FilterOperator = iota
|
||||
FilterOperatorEqual
|
||||
FilterOperatorNotEqual
|
||||
FilterOperatorGreaterThan
|
||||
FilterOperatorGreaterThanOrEq
|
||||
FilterOperatorLessThan
|
||||
FilterOperatorLessThanOrEq
|
||||
|
||||
FilterOperatorLike
|
||||
FilterOperatorNotLike
|
||||
FilterOperatorILike
|
||||
FilterOperatorNotILike
|
||||
|
||||
FilterOperatorBetween
|
||||
FilterOperatorNotBetween
|
||||
|
||||
FilterOperatorIn
|
||||
FilterOperatorNotIn
|
||||
|
||||
FilterOperatorExists
|
||||
FilterOperatorNotExists
|
||||
|
||||
FilterOperatorRegexp
|
||||
FilterOperatorNotRegexp
|
||||
|
||||
FilterOperatorContains
|
||||
FilterOperatorNotContains
|
||||
)
|
||||
|
||||
// ConditionBuilder is the interface for building the condition part of the query.
|
||||
type ConditionBuilder interface {
|
||||
// GetColumn returns the column for the given key.
|
||||
GetColumn(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (*schema.Column, error)
|
||||
|
||||
// GetTableFieldName returns the table field name for the given key.
|
||||
GetTableFieldName(ctx context.Context, key *telemetrytypes.TelemetryFieldKey) (string, error)
|
||||
|
||||
// GetCondition returns the condition for the given key, operator and value.
|
||||
GetCondition(ctx context.Context, key *telemetrytypes.TelemetryFieldKey, operator FilterOperator, value any, sb *sqlbuilder.SelectBuilder) (string, error)
|
||||
}
|
||||
174
pkg/types/telemetrytypes/field.go
Normal file
174
pkg/types/telemetrytypes/field.go
Normal file
@@ -0,0 +1,174 @@
|
||||
package telemetrytypes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// FieldSelectorMatchType is the match type of the field key selector.
|
||||
type FieldSelectorMatchType struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
FieldSelectorMatchTypeExact = FieldSelectorMatchType{valuer.NewString("exact")}
|
||||
FieldSelectorMatchTypeFuzzy = FieldSelectorMatchType{valuer.NewString("fuzzy")}
|
||||
)
|
||||
|
||||
type TelemetryFieldKey struct {
|
||||
Name string `json:"name"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
Signal Signal `json:"signal,omitempty"`
|
||||
FieldContext FieldContext `json:"fieldContext,omitempty"`
|
||||
FieldDataType FieldDataType `json:"fieldDataType,omitempty"`
|
||||
Materialized bool `json:"materialized,omitempty"`
|
||||
}
|
||||
|
||||
// GetFieldKeyFromKeyText returns a TelemetryFieldKey from a key text.
|
||||
// The key text is expected to be in the format of `fieldContext.fieldName:fieldDataType` in the search query.
|
||||
func GetFieldKeyFromKeyText(key string) TelemetryFieldKey {
|
||||
|
||||
keyTextParts := strings.Split(key, ".")
|
||||
|
||||
var explicitFieldContextProvided, explicitFieldDataTypeProvided bool
|
||||
var explicitFieldContext FieldContext
|
||||
var explicitFieldDataType FieldDataType
|
||||
var ok bool
|
||||
|
||||
if len(keyTextParts) > 1 {
|
||||
explicitFieldContext, ok = fieldContexts[keyTextParts[0]]
|
||||
if ok && explicitFieldContext != FieldContextUnspecified {
|
||||
explicitFieldContextProvided = true
|
||||
}
|
||||
}
|
||||
|
||||
if explicitFieldContextProvided {
|
||||
keyTextParts = keyTextParts[1:]
|
||||
}
|
||||
|
||||
// check if there is a field data type provided
|
||||
if len(keyTextParts) >= 1 {
|
||||
lastPart := keyTextParts[len(keyTextParts)-1]
|
||||
lastPartParts := strings.Split(lastPart, ":")
|
||||
if len(lastPartParts) > 1 {
|
||||
explicitFieldDataType, ok = fieldDataTypes[lastPartParts[1]]
|
||||
if ok && explicitFieldDataType != FieldDataTypeUnspecified {
|
||||
explicitFieldDataTypeProvided = true
|
||||
}
|
||||
}
|
||||
|
||||
if explicitFieldDataTypeProvided {
|
||||
keyTextParts[len(keyTextParts)-1] = lastPartParts[0]
|
||||
}
|
||||
}
|
||||
|
||||
realKey := strings.Join(keyTextParts, ".")
|
||||
|
||||
fieldKeySelector := TelemetryFieldKey{
|
||||
Name: realKey,
|
||||
}
|
||||
|
||||
if explicitFieldContextProvided {
|
||||
fieldKeySelector.FieldContext = explicitFieldContext
|
||||
} else {
|
||||
fieldKeySelector.FieldContext = FieldContextUnspecified
|
||||
}
|
||||
|
||||
if explicitFieldDataTypeProvided {
|
||||
fieldKeySelector.FieldDataType = explicitFieldDataType
|
||||
} else {
|
||||
fieldKeySelector.FieldDataType = FieldDataTypeUnspecified
|
||||
}
|
||||
|
||||
return fieldKeySelector
|
||||
}
|
||||
|
||||
func FieldKeyToMaterializedColumnName(key *TelemetryFieldKey) string {
|
||||
return fmt.Sprintf("%s_%s_%s", key.FieldContext, key.FieldDataType.String, strings.ReplaceAll(key.Name, ".", "$$"))
|
||||
}
|
||||
|
||||
func FieldKeyToMaterializedColumnNameForExists(key *TelemetryFieldKey) string {
|
||||
return fmt.Sprintf("%s_%s_%s_exists", key.FieldContext, key.FieldDataType.String, strings.ReplaceAll(key.Name, ".", "$$"))
|
||||
}
|
||||
|
||||
type TelemetryFieldValues struct {
|
||||
StringValues []string `json:"stringValues,omitempty"`
|
||||
BoolValues []bool `json:"boolValues,omitempty"`
|
||||
NumberValues []float64 `json:"numberValues,omitempty"`
|
||||
RelatedValues []string `json:"relatedValues,omitempty"`
|
||||
}
|
||||
|
||||
type MetricContext struct {
|
||||
MetricName string `json:"metricName"`
|
||||
}
|
||||
|
||||
type FieldKeySelector struct {
|
||||
StartUnixMilli int64 `json:"startUnixMilli"`
|
||||
EndUnixMilli int64 `json:"endUnixMilli"`
|
||||
Signal Signal `json:"signal"`
|
||||
FieldContext FieldContext `json:"fieldContext"`
|
||||
FieldDataType FieldDataType `json:"fieldDataType"`
|
||||
Name string `json:"name"`
|
||||
SelectorMatchType FieldSelectorMatchType `json:"selectorMatchType"`
|
||||
Limit int `json:"limit"`
|
||||
MetricContext *MetricContext `json:"metricContext,omitempty"`
|
||||
}
|
||||
|
||||
type FieldValueSelector struct {
|
||||
FieldKeySelector
|
||||
ExistingQuery string `json:"existingQuery"`
|
||||
Value string `json:"value"`
|
||||
Limit int `json:"limit"`
|
||||
}
|
||||
|
||||
func DataTypeCollisionHandledFieldName(key *TelemetryFieldKey, value any, tblFieldName string) (string, any) {
|
||||
// This block of code exists to handle the data type collisions
|
||||
// We don't want to fail the requests when there is a key with more than one data type
|
||||
// Let's take an example of `http.status_code`, and consider user sent a string value and number value
|
||||
// When they search for `http.status_code=200`, we will search across both the number columns and string columns
|
||||
// and return the results from both the columns
|
||||
// While we expect user not to send the mixed data types, it inevitably happens
|
||||
// So we handle the data type collisions here
|
||||
switch key.FieldDataType {
|
||||
case FieldDataTypeString:
|
||||
switch value.(type) {
|
||||
case float64:
|
||||
// try to convert the string value to to number
|
||||
tblFieldName = fmt.Sprintf(`toFloat64OrNull(%s)`, tblFieldName)
|
||||
case []any:
|
||||
areFloats := true
|
||||
for _, v := range value.([]any) {
|
||||
if _, ok := v.(float64); !ok {
|
||||
areFloats = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if areFloats {
|
||||
tblFieldName = fmt.Sprintf(`toFloat64OrNull(%s)`, tblFieldName)
|
||||
}
|
||||
case bool:
|
||||
// we don't have a toBoolOrNull in ClickHouse, so we need to convert the bool to a string
|
||||
value = fmt.Sprintf("%t", value)
|
||||
case string:
|
||||
// nothing to do
|
||||
}
|
||||
case FieldDataTypeFloat64, FieldDataTypeInt64, FieldDataTypeNumber:
|
||||
switch value.(type) {
|
||||
case string:
|
||||
// try to convert the string value to to number
|
||||
tblFieldName = fmt.Sprintf(`toString(%s)`, tblFieldName)
|
||||
case float64:
|
||||
// nothing to do
|
||||
}
|
||||
case FieldDataTypeBool:
|
||||
switch value.(type) {
|
||||
case string:
|
||||
// try to convert the string value to to number
|
||||
tblFieldName = fmt.Sprintf(`toString(%s)`, tblFieldName)
|
||||
}
|
||||
}
|
||||
return tblFieldName, value
|
||||
}
|
||||
148
pkg/types/telemetrytypes/field_context.go
Normal file
148
pkg/types/telemetrytypes/field_context.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package telemetrytypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// FieldContext is the context of the field being queried. It is expected to be used to disambiguate b/w
|
||||
// different contexts of the same field.
|
||||
//
|
||||
// - Use `resource.` prefix to the key to explicitly indicate and enforce resource context. Example
|
||||
// - `resource.service.name`
|
||||
// - `resource.k8s.namespace.name`
|
||||
//
|
||||
// - Use `scope.` prefix to explicitly indicate and enforce scope context. Example
|
||||
// - `scope.name`
|
||||
// - `scope.version`
|
||||
// - `scope.my.custom.attribute` and `scope.attribute.my.custom.attribute` resolve to same attribute
|
||||
//
|
||||
// - Use `attribute.` to explicitly indicate and enforce attribute context. Example
|
||||
// - `attribute.http.method`
|
||||
// - `attribute.http.target`
|
||||
//
|
||||
// - Use `event.` to indicate and enforce event context and `event.attribute` to disambiguate b/w `event.name` and `event.attribute.name` . Examples
|
||||
// - `event.name` will look for event name
|
||||
// - `event.record_entry` will look for `record_entry` attribute in event
|
||||
// - `event.attribute.name` will look for `name` attribute event
|
||||
//
|
||||
// - Use `span.` to indicate the span context.
|
||||
// - `span.name` will resolve to the name of span
|
||||
// - `span.kind` will resolve to the kind of span
|
||||
// - `span.http.method` will resolve to `http.method` of attribute
|
||||
//
|
||||
// - Use `log.` for explicit log context
|
||||
// - `log.severity_text` will always resolve to `severity_text` of log record
|
||||
type FieldContext struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
FieldContextMetric = FieldContext{valuer.NewString("metric")}
|
||||
FieldContextLog = FieldContext{valuer.NewString("log")}
|
||||
FieldContextSpan = FieldContext{valuer.NewString("span")}
|
||||
FieldContextTrace = FieldContext{valuer.NewString("trace")}
|
||||
FieldContextResource = FieldContext{valuer.NewString("resource")}
|
||||
FieldContextScope = FieldContext{valuer.NewString("scope")}
|
||||
FieldContextAttribute = FieldContext{valuer.NewString("attribute")}
|
||||
FieldContextEvent = FieldContext{valuer.NewString("event")}
|
||||
FieldContextUnspecified = FieldContext{valuer.NewString("")}
|
||||
|
||||
// Map string representations to FieldContext values
|
||||
// We wouldn't need if not for the fact that we have historically used
|
||||
// "tag" and "attribute" interchangeably.
|
||||
// This means elsewhere in the system, we have used "tag" to refer to "attribute".
|
||||
// There are DB entries that use "tag" and "attribute" interchangeably.
|
||||
// This is a stop gap measure to ensure that we can still use the existing
|
||||
// DB entries.
|
||||
fieldContexts = map[string]FieldContext{
|
||||
"resource": FieldContextResource,
|
||||
"scope": FieldContextScope,
|
||||
"tag": FieldContextAttribute,
|
||||
"attribute": FieldContextAttribute,
|
||||
"event": FieldContextEvent,
|
||||
"spanfield": FieldContextSpan,
|
||||
"span": FieldContextSpan,
|
||||
"logfield": FieldContextLog,
|
||||
"log": FieldContextLog,
|
||||
"metric": FieldContextMetric,
|
||||
"tracefield": FieldContextTrace,
|
||||
}
|
||||
)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (f *FieldContext) UnmarshalJSON(data []byte) error {
|
||||
var str string
|
||||
if err := json.Unmarshal(data, &str); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Normalize the string
|
||||
normalizedStr := strings.ToLower(strings.TrimSpace(str))
|
||||
|
||||
// Look up the context in our map
|
||||
if ctx, exists := fieldContexts[normalizedStr]; exists {
|
||||
*f = ctx
|
||||
return nil
|
||||
}
|
||||
|
||||
// Default to unspecified if not found
|
||||
*f = FieldContextUnspecified
|
||||
return nil
|
||||
}
|
||||
|
||||
// Scan implements the sql.Scanner interface
|
||||
func (f *FieldContext) Scan(value interface{}) error {
|
||||
if f == nil {
|
||||
return fmt.Errorf("fieldcontext: nil receiver")
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
*f = FieldContextUnspecified
|
||||
return nil
|
||||
}
|
||||
|
||||
str, ok := value.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("fieldcontext: expected string, got %T", value)
|
||||
}
|
||||
|
||||
// Normalize the string
|
||||
normalizedStr := strings.ToLower(strings.TrimSpace(str))
|
||||
|
||||
// Look up the context in our map
|
||||
if ctx, exists := fieldContexts[normalizedStr]; exists {
|
||||
*f = ctx
|
||||
return nil
|
||||
}
|
||||
|
||||
// Default to unspecified if not found
|
||||
*f = FieldContextUnspecified
|
||||
return nil
|
||||
}
|
||||
|
||||
// TagType returns the tag type for the field context.
|
||||
func (f FieldContext) TagType() string {
|
||||
switch f {
|
||||
case FieldContextResource:
|
||||
return "resource"
|
||||
case FieldContextScope:
|
||||
return "scope"
|
||||
case FieldContextAttribute:
|
||||
return "tag"
|
||||
case FieldContextLog:
|
||||
return "logfield"
|
||||
case FieldContextSpan:
|
||||
return "spanfield"
|
||||
case FieldContextTrace:
|
||||
return "tracefield"
|
||||
case FieldContextMetric:
|
||||
return "metricfield"
|
||||
case FieldContextEvent:
|
||||
return "eventfield"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
121
pkg/types/telemetrytypes/field_datatype.go
Normal file
121
pkg/types/telemetrytypes/field_datatype.go
Normal file
@@ -0,0 +1,121 @@
|
||||
package telemetrytypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// FieldDataType is the data type of the field. It is expected to be used to disambiguate b/w
|
||||
// different data types of the same field.
|
||||
type FieldDataType struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
FieldDataTypeString = FieldDataType{valuer.NewString("string")}
|
||||
FieldDataTypeBool = FieldDataType{valuer.NewString("bool")}
|
||||
FieldDataTypeFloat64 = FieldDataType{valuer.NewString("float64")}
|
||||
// int64 and number are synonyms for float64
|
||||
FieldDataTypeInt64 = FieldDataType{valuer.NewString("int64")}
|
||||
FieldDataTypeNumber = FieldDataType{valuer.NewString("number")}
|
||||
FieldDataTypeUnspecified = FieldDataType{valuer.NewString("")}
|
||||
|
||||
// Map string representations to FieldDataType values
|
||||
// We want to handle all the possible string representations of the data types.
|
||||
// Even if the user uses some non-standard representation, we want to be able to
|
||||
// parse it correctly.
|
||||
fieldDataTypes = map[string]FieldDataType{
|
||||
// String types
|
||||
"string": FieldDataTypeString,
|
||||
|
||||
// Boolean types
|
||||
"bool": FieldDataTypeBool,
|
||||
|
||||
// Integer types
|
||||
"int": FieldDataTypeNumber,
|
||||
"int8": FieldDataTypeNumber,
|
||||
"int16": FieldDataTypeNumber,
|
||||
"int32": FieldDataTypeNumber,
|
||||
"int64": FieldDataTypeNumber,
|
||||
"uint": FieldDataTypeNumber,
|
||||
"uint8": FieldDataTypeNumber,
|
||||
"uint16": FieldDataTypeNumber,
|
||||
"uint32": FieldDataTypeNumber,
|
||||
"uint64": FieldDataTypeNumber,
|
||||
|
||||
// Float types
|
||||
"float": FieldDataTypeNumber,
|
||||
"float32": FieldDataTypeNumber,
|
||||
"float64": FieldDataTypeNumber,
|
||||
"double": FieldDataTypeNumber,
|
||||
"decimal": FieldDataTypeNumber,
|
||||
"number": FieldDataTypeNumber,
|
||||
}
|
||||
)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (f *FieldDataType) UnmarshalJSON(data []byte) error {
|
||||
var str string
|
||||
if err := json.Unmarshal(data, &str); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Normalize the string
|
||||
normalizedStr := strings.ToLower(strings.TrimSpace(str))
|
||||
|
||||
// Look up the data type in our map
|
||||
if dataType, exists := fieldDataTypes[normalizedStr]; exists {
|
||||
*f = dataType
|
||||
return nil
|
||||
}
|
||||
|
||||
// Default to unspecified if not found
|
||||
*f = FieldDataTypeUnspecified
|
||||
return nil
|
||||
}
|
||||
|
||||
// Scan implements the sql.Scanner interface
|
||||
func (f *FieldDataType) Scan(value interface{}) error {
|
||||
if f == nil {
|
||||
return fmt.Errorf("fielddatatype: nil receiver")
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
*f = FieldDataTypeUnspecified
|
||||
return nil
|
||||
}
|
||||
|
||||
str, ok := value.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("fielddatatype: expected string, got %T", value)
|
||||
}
|
||||
|
||||
// Normalize the string
|
||||
normalizedStr := strings.ToLower(strings.TrimSpace(str))
|
||||
|
||||
// Look up the data type in our map
|
||||
if dataType, exists := fieldDataTypes[normalizedStr]; exists {
|
||||
*f = dataType
|
||||
return nil
|
||||
}
|
||||
|
||||
// Default to unspecified if not found
|
||||
*f = FieldDataTypeUnspecified
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f FieldDataType) TagDataType() string {
|
||||
switch f {
|
||||
case FieldDataTypeString:
|
||||
return "string"
|
||||
case FieldDataTypeBool:
|
||||
return "bool"
|
||||
case FieldDataTypeNumber, FieldDataTypeInt64, FieldDataTypeFloat64:
|
||||
return "float64"
|
||||
default:
|
||||
return "string"
|
||||
}
|
||||
}
|
||||
93
pkg/types/telemetrytypes/field_test.go
Normal file
93
pkg/types/telemetrytypes/field_test.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package telemetrytypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetFieldKeyFromKeyText(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
keyText string
|
||||
expected TelemetryFieldKey
|
||||
}{
|
||||
{
|
||||
keyText: "resource.service.name:string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: FieldContextResource,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
{
|
||||
keyText: "scope.name",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: FieldContextScope,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
{
|
||||
keyText: "scope.version",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "version",
|
||||
FieldContext: FieldContextScope,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
{
|
||||
keyText: "attribute.http.method",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.method",
|
||||
FieldContext: FieldContextAttribute,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
{
|
||||
keyText: "span.name",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "name",
|
||||
FieldContext: FieldContextSpan,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
{
|
||||
keyText: "span.kind:string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "kind",
|
||||
FieldContext: FieldContextSpan,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
{
|
||||
keyText: "span.kind:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "kind",
|
||||
FieldContext: FieldContextSpan,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
{
|
||||
keyText: "span.http.status_code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status_code",
|
||||
FieldContext: FieldContextSpan,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
{
|
||||
keyText: "log.severity_text",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "severity_text",
|
||||
FieldContext: FieldContextLog,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
result := GetFieldKeyFromKeyText(testCase.keyText)
|
||||
if result != testCase.expected {
|
||||
t.Errorf("expected %v, got %v", testCase.expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
14
pkg/types/telemetrytypes/signal.go
Normal file
14
pkg/types/telemetrytypes/signal.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package telemetrytypes
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/valuer"
|
||||
|
||||
type Signal struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
SignalTraces = Signal{valuer.NewString("traces")}
|
||||
SignalLogs = Signal{valuer.NewString("logs")}
|
||||
SignalMetrics = Signal{valuer.NewString("metrics")}
|
||||
SignalUnspecified = Signal{valuer.NewString("")}
|
||||
)
|
||||
25
pkg/types/telemetrytypes/store.go
Normal file
25
pkg/types/telemetrytypes/store.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package telemetrytypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// MetadataStore is the interface for the telemetry metadata store.
|
||||
type MetadataStore interface {
|
||||
// GetKeys returns a map of field keys types.TelemetryFieldKey by name, there can be multiple keys with the same name
|
||||
// if they have different types or data types.
|
||||
GetKeys(ctx context.Context, fieldKeySelector *FieldKeySelector) (map[string][]*TelemetryFieldKey, error)
|
||||
|
||||
// GetKeys but with any number of fieldKeySelectors.
|
||||
GetKeysMulti(ctx context.Context, fieldKeySelectors []*FieldKeySelector) (map[string][]*TelemetryFieldKey, error)
|
||||
|
||||
// GetKey returns a list of keys with the given name.
|
||||
GetKey(ctx context.Context, fieldKeySelector *FieldKeySelector) ([]*TelemetryFieldKey, error)
|
||||
|
||||
// GetRelatedValues returns a list of related values for the given key name
|
||||
// and the existing selection of keys.
|
||||
GetRelatedValues(ctx context.Context, fieldValueSelector *FieldValueSelector) ([]string, error)
|
||||
|
||||
// GetAllValues returns a list of all values.
|
||||
GetAllValues(ctx context.Context, fieldValueSelector *FieldValueSelector) (*TelemetryFieldValues, error)
|
||||
}
|
||||
5
pkg/types/telemetrytypes/telemetrystmt.go
Normal file
5
pkg/types/telemetrytypes/telemetrystmt.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package telemetrytypes
|
||||
|
||||
type ShowCreateTableStatement struct {
|
||||
Statement string `json:"statement" ch:"statement"`
|
||||
}
|
||||
21
pkg/types/telemetrytypes/virtualfield.go
Normal file
21
pkg/types/telemetrytypes/virtualfield.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package telemetrytypes
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type VirtualField struct {
|
||||
bun.BaseModel `bun:"table:virtual_field"`
|
||||
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
|
||||
Name string `bun:"name,type:text,notnull" json:"name"`
|
||||
Expression string `bun:"expression,type:text,notnull" json:"expression"`
|
||||
Description string `bun:"description,type:text" json:"description"`
|
||||
Signal Signal `bun:"signal,type:text,notnull" json:"signal"`
|
||||
OrgID valuer.UUID `bun:"org_id,type:text,notnull" json:"orgId"`
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user