mirror of
https://github.com/SigNoz/signoz.git
synced 2026-05-12 21:20:30 +01:00
Compare commits
6 Commits
feat/overv
...
test/seede
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
914e87158b | ||
|
|
b98359a785 | ||
|
|
a913e67acf | ||
|
|
c86df3adcb | ||
|
|
1641d5cdbe | ||
|
|
b9a44dbf02 |
1
.github/workflows/build-community.yaml
vendored
1
.github/workflows/build-community.yaml
vendored
@@ -54,6 +54,7 @@ jobs:
|
||||
JS_SRC: frontend
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: community-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
JS_PKG_MANAGER: pnpm
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
|
||||
1
.github/workflows/build-enterprise.yaml
vendored
1
.github/workflows/build-enterprise.yaml
vendored
@@ -87,6 +87,7 @@ jobs:
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
JS_PKG_MANAGER: pnpm
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
|
||||
1
.github/workflows/build-staging.yaml
vendored
1
.github/workflows/build-staging.yaml
vendored
@@ -86,6 +86,7 @@ jobs:
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: staging-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
JS_PKG_MANAGER: pnpm
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
|
||||
22
.github/workflows/e2eci.yaml
vendored
22
.github/workflows/e2eci.yaml
vendored
@@ -21,15 +21,19 @@ jobs:
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: lts/*
|
||||
- name: install-pnpm
|
||||
uses: pnpm/action-setup@v6
|
||||
with:
|
||||
version: 10
|
||||
- name: install
|
||||
run: |
|
||||
cd tests/e2e && yarn install --frozen-lockfile
|
||||
cd tests/e2e && pnpm install
|
||||
- name: fmt
|
||||
run: |
|
||||
cd tests/e2e && yarn fmt:check
|
||||
cd tests/e2e && pnpm fmt:check
|
||||
- name: lint
|
||||
run: |
|
||||
cd tests/e2e && yarn lint
|
||||
cd tests/e2e && pnpm lint
|
||||
test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
@@ -54,15 +58,19 @@ jobs:
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: lts/*
|
||||
- name: install-pnpm
|
||||
uses: pnpm/action-setup@v6
|
||||
with:
|
||||
version: 10
|
||||
- name: python-install
|
||||
run: |
|
||||
cd tests && uv sync
|
||||
- name: yarn-install
|
||||
- name: pnpm-install
|
||||
run: |
|
||||
cd tests/e2e && yarn install --frozen-lockfile
|
||||
cd tests/e2e && pnpm install --frozen-lockfile
|
||||
- name: playwright-browsers
|
||||
run: |
|
||||
cd tests/e2e && yarn playwright install --with-deps ${{ matrix.project }}
|
||||
cd tests/e2e && pnpm playwright install --with-deps ${{ matrix.project }}
|
||||
- name: bring-up-stack
|
||||
run: |
|
||||
cd tests && \
|
||||
@@ -73,7 +81,7 @@ jobs:
|
||||
- name: playwright-test
|
||||
run: |
|
||||
cd tests/e2e && \
|
||||
yarn playwright test --project=${{ matrix.project }}
|
||||
pnpm playwright test --project=${{ matrix.project }}
|
||||
- name: teardown-stack
|
||||
if: always()
|
||||
run: |
|
||||
|
||||
4
.github/workflows/goci.yaml
vendored
4
.github/workflows/goci.yaml
vendored
@@ -77,6 +77,10 @@ jobs:
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: "22"
|
||||
- name: setup-pnpm
|
||||
uses: pnpm/action-setup@v6
|
||||
with:
|
||||
version: 10
|
||||
- name: docker-community
|
||||
shell: bash
|
||||
run: |
|
||||
|
||||
4
.github/workflows/gor-signoz-community.yaml
vendored
4
.github/workflows/gor-signoz-community.yaml
vendored
@@ -25,6 +25,10 @@ jobs:
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: "22"
|
||||
- name: setup-pnpm
|
||||
uses: pnpm/action-setup@v6
|
||||
with:
|
||||
version: 10
|
||||
- name: build-frontend
|
||||
run: make js-build
|
||||
- name: upload-frontend-artifact
|
||||
|
||||
4
.github/workflows/gor-signoz.yaml
vendored
4
.github/workflows/gor-signoz.yaml
vendored
@@ -41,6 +41,10 @@ jobs:
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: "22"
|
||||
- name: setup-pnpm
|
||||
uses: pnpm/action-setup@v6
|
||||
with:
|
||||
version: 10
|
||||
- name: build-frontend
|
||||
run: make js-build
|
||||
- name: upload-frontend-artifact
|
||||
|
||||
14
.github/workflows/jsci.yaml
vendored
14
.github/workflows/jsci.yaml
vendored
@@ -22,6 +22,7 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_PKG_MANAGER: pnpm
|
||||
test:
|
||||
if: |
|
||||
github.event_name == 'merge_group' ||
|
||||
@@ -32,6 +33,7 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_PKG_MANAGER: pnpm
|
||||
fmt:
|
||||
if: |
|
||||
github.event_name == 'merge_group' ||
|
||||
@@ -42,6 +44,7 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_PKG_MANAGER: pnpm
|
||||
lint:
|
||||
if: |
|
||||
github.event_name == 'merge_group' ||
|
||||
@@ -52,6 +55,7 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
JS_PKG_MANAGER: pnpm
|
||||
languages:
|
||||
if: |
|
||||
github.event_name == 'merge_group' ||
|
||||
@@ -76,9 +80,13 @@ jobs:
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: "22"
|
||||
- name: install-pnpm
|
||||
uses: pnpm/action-setup@v6
|
||||
with:
|
||||
version: 10
|
||||
- name: install-frontend
|
||||
run: cd frontend && yarn install
|
||||
run: cd frontend && pnpm install
|
||||
- name: generate-api-clients
|
||||
run: |
|
||||
cd frontend && yarn generate:api
|
||||
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in generated api clients. Run yarn generate:api in frontend/ locally and commit."; exit 1)
|
||||
cd frontend && pnpm generate:api
|
||||
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in generated api clients. Run pnpm generate:api in frontend/ locally and commit."; exit 1)
|
||||
|
||||
10
.gitpod.yml
10
.gitpod.yml
@@ -1,19 +1,21 @@
|
||||
# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file)
|
||||
# and commit this file to your remote git repository to share the goodness with others.
|
||||
|
||||
|
||||
tasks:
|
||||
- name: Run Docker Images
|
||||
init: |
|
||||
cd ./deploy/docker
|
||||
sudo docker compose up -d
|
||||
|
||||
- name: Install pnpm
|
||||
init: |
|
||||
npm i -g pnpm
|
||||
|
||||
- name: Run Frontend
|
||||
init: |
|
||||
cd ./frontend
|
||||
yarn install
|
||||
command:
|
||||
yarn dev
|
||||
pnpm install
|
||||
command: pnpm dev
|
||||
|
||||
ports:
|
||||
- port: 8080
|
||||
|
||||
2
Makefile
2
Makefile
@@ -154,7 +154,7 @@ $(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
|
||||
.PHONY: js-build
|
||||
js-build: ## Builds the js frontend
|
||||
@echo ">> building js frontend"
|
||||
@cd $(JS_BUILD_CONTEXT) && CI=1 yarn install && yarn build
|
||||
@cd $(JS_BUILD_CONTEXT) && CI=1 pnpm install && pnpm build
|
||||
|
||||
##############################################################
|
||||
# docker commands
|
||||
|
||||
@@ -3,8 +3,9 @@ FROM node:22-bookworm AS build
|
||||
WORKDIR /opt/
|
||||
COPY ./frontend/ ./
|
||||
ENV NODE_OPTIONS=--max-old-space-size=8192
|
||||
RUN CI=1 yarn install
|
||||
RUN CI=1 yarn build
|
||||
RUN CI=1 npm i -g pnpm
|
||||
RUN CI=1 pnpm install
|
||||
RUN CI=1 pnpm build
|
||||
|
||||
FROM golang:1.25-bookworm
|
||||
|
||||
|
||||
@@ -2223,8 +2223,6 @@ components:
|
||||
type: boolean
|
||||
org_id:
|
||||
type: string
|
||||
source:
|
||||
type: string
|
||||
updatedAt:
|
||||
format: date-time
|
||||
type: string
|
||||
@@ -3015,6 +3013,32 @@ components:
|
||||
- end
|
||||
- limit
|
||||
type: object
|
||||
InframonitoringtypesPostableVolumes:
|
||||
properties:
|
||||
end:
|
||||
format: int64
|
||||
type: integer
|
||||
filter:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5Filter'
|
||||
groupBy:
|
||||
items:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5GroupByKey'
|
||||
nullable: true
|
||||
type: array
|
||||
limit:
|
||||
type: integer
|
||||
offset:
|
||||
type: integer
|
||||
orderBy:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
|
||||
start:
|
||||
format: int64
|
||||
type: integer
|
||||
required:
|
||||
- start
|
||||
- end
|
||||
- limit
|
||||
type: object
|
||||
InframonitoringtypesRequiredMetricsCheck:
|
||||
properties:
|
||||
missingMetrics:
|
||||
@@ -3030,6 +3054,67 @@ components:
|
||||
- list
|
||||
- grouped_list
|
||||
type: string
|
||||
InframonitoringtypesVolumeRecord:
|
||||
properties:
|
||||
meta:
|
||||
additionalProperties:
|
||||
type: string
|
||||
nullable: true
|
||||
type: object
|
||||
persistentVolumeClaimName:
|
||||
type: string
|
||||
volumeAvailable:
|
||||
format: double
|
||||
type: number
|
||||
volumeCapacity:
|
||||
format: double
|
||||
type: number
|
||||
volumeInodes:
|
||||
format: double
|
||||
type: number
|
||||
volumeInodesFree:
|
||||
format: double
|
||||
type: number
|
||||
volumeInodesUsed:
|
||||
format: double
|
||||
type: number
|
||||
volumeUsage:
|
||||
format: double
|
||||
type: number
|
||||
required:
|
||||
- persistentVolumeClaimName
|
||||
- volumeAvailable
|
||||
- volumeCapacity
|
||||
- volumeUsage
|
||||
- volumeInodes
|
||||
- volumeInodesFree
|
||||
- volumeInodesUsed
|
||||
- meta
|
||||
type: object
|
||||
InframonitoringtypesVolumes:
|
||||
properties:
|
||||
endTimeBeforeRetention:
|
||||
type: boolean
|
||||
records:
|
||||
items:
|
||||
$ref: '#/components/schemas/InframonitoringtypesVolumeRecord'
|
||||
nullable: true
|
||||
type: array
|
||||
requiredMetricsCheck:
|
||||
$ref: '#/components/schemas/InframonitoringtypesRequiredMetricsCheck'
|
||||
total:
|
||||
type: integer
|
||||
type:
|
||||
$ref: '#/components/schemas/InframonitoringtypesResponseType'
|
||||
warning:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5QueryWarnData'
|
||||
required:
|
||||
- type
|
||||
- records
|
||||
- total
|
||||
- requiredMetricsCheck
|
||||
- endTimeBeforeRetention
|
||||
type: object
|
||||
LlmpricingruletypesGettablePricingRules:
|
||||
properties:
|
||||
items:
|
||||
@@ -12171,6 +12256,77 @@ paths:
|
||||
summary: List Pods for Infra Monitoring
|
||||
tags:
|
||||
- inframonitoring
|
||||
/api/v2/infra_monitoring/pvcs:
|
||||
post:
|
||||
deprecated: false
|
||||
description: 'Returns a paginated list of Kubernetes persistent volume claims
|
||||
(PVCs) with key volume metrics: available bytes, capacity bytes, usage (capacity
|
||||
- available), inodes, free inodes, and used inodes. Each row also includes
|
||||
metadata attributes (k8s.persistentvolumeclaim.name, k8s.pod.uid, k8s.pod.name,
|
||||
k8s.namespace.name, k8s.node.name, k8s.statefulset.name, k8s.cluster.name).
|
||||
Supports filtering via a filter expression, custom groupBy to aggregate volumes
|
||||
by any attribute, ordering by any of the six metrics (available, capacity,
|
||||
usage, inodes, inodes_free, inodes_used), and pagination via offset/limit.
|
||||
The response type is ''list'' for the default k8s.persistentvolumeclaim.name
|
||||
grouping or ''grouped_list'' for custom groupBy keys; in both modes every
|
||||
row aggregates volumes in the group. Also reports missing required metrics
|
||||
and whether the requested time range falls before the data retention boundary.
|
||||
Numeric metric fields (volumeAvailable, volumeCapacity, volumeUsage, volumeInodes,
|
||||
volumeInodesFree, volumeInodesUsed) return -1 as a sentinel when no data is
|
||||
available for that field.'
|
||||
operationId: ListVolumes
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InframonitoringtypesPostableVolumes'
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/InframonitoringtypesVolumes'
|
||||
status:
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
- data
|
||||
type: object
|
||||
description: OK
|
||||
"400":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Bad Request
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- VIEWER
|
||||
- tokenizer:
|
||||
- VIEWER
|
||||
summary: List Volumes for Infra Monitoring
|
||||
tags:
|
||||
- inframonitoring
|
||||
/api/v2/livez:
|
||||
get:
|
||||
deprecated: false
|
||||
|
||||
@@ -13,13 +13,12 @@ Before diving in, make sure you have these tools installed:
|
||||
- Download from [go.dev/dl](https://go.dev/dl/)
|
||||
- Check [go.mod](../../go.mod#L3) for the minimum version
|
||||
|
||||
|
||||
- **Node** - Powers our frontend
|
||||
- Download from [nodejs.org](https://nodejs.org)
|
||||
- Check [.nvmrc](../../frontend/.nvmrc) for the version
|
||||
|
||||
- **Yarn** - Our frontend package manager
|
||||
- Follow the [installation guide](https://yarnpkg.com/getting-started/install)
|
||||
- **Pnpm** - Our frontend package manager
|
||||
- Follow the [installation guide](https://pnpm.io/installation)
|
||||
|
||||
- **Docker** - For running Clickhouse and Postgres locally
|
||||
- Get it from [docs.docker.com/get-docker](https://docs.docker.com/get-docker/)
|
||||
@@ -95,7 +94,7 @@ This command:
|
||||
|
||||
2. Install dependencies:
|
||||
```bash
|
||||
yarn install
|
||||
pnpm install
|
||||
```
|
||||
|
||||
3. Create a `.env` file in this directory:
|
||||
@@ -105,10 +104,10 @@ This command:
|
||||
|
||||
4. Start the development server:
|
||||
```bash
|
||||
yarn dev
|
||||
pnpm dev
|
||||
```
|
||||
|
||||
> 💡 **Tip**: `yarn dev` will automatically rebuild when you make changes to the code
|
||||
> 💡 **Tip**: `pnpm dev` will automatically rebuild when you make changes to the code
|
||||
|
||||
Now you're all set to start developing! Happy coding! 🎉
|
||||
|
||||
|
||||
@@ -304,7 +304,7 @@ import ec2Url from '@/assets/Logos/ec2.svg';
|
||||
|
||||
1. Add the logo SVG to `src/assets/Logos/` and add a top-level import in the config file (e.g., `import myServiceUrl from '@/assets/Logos/my-service.svg'`)
|
||||
2. Add your data source object to the `onboardingConfigWithLinks` array, referencing the imported variable for `imgUrl`
|
||||
3. Test the flow locally with `yarn dev`
|
||||
3. Test the flow locally with `pnpm dev`
|
||||
4. Validation:
|
||||
- Navigate to the [onboarding page](http://localhost:3301/get-started-with-signoz-cloud) on your local machine
|
||||
- Data source appears in the list
|
||||
|
||||
@@ -138,8 +138,8 @@ func (module *module) Delete(ctx context.Context, orgID valuer.UUID, id valuer.U
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dashboard.CanDelete(); err != nil {
|
||||
return err
|
||||
if dashboard.Locked {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "dashboard is locked, please unlock the dashboard to be delete it")
|
||||
}
|
||||
|
||||
err = module.store.RunInTx(ctx, func(ctx context.Context) error {
|
||||
@@ -213,14 +213,6 @@ func (module *module) Update(ctx context.Context, orgID valuer.UUID, id valuer.U
|
||||
return module.pkgDashboardModule.Update(ctx, orgID, id, updatedBy, data, diff)
|
||||
}
|
||||
|
||||
func (module *module) ResetSystemDashboard(ctx context.Context, orgID valuer.UUID, source dashboardtypes.Source, updatedBy string) (*dashboardtypes.Dashboard, error) {
|
||||
return module.pkgDashboardModule.ResetSystemDashboard(ctx, orgID, source, updatedBy)
|
||||
}
|
||||
|
||||
func (module *module) SetDefaultConfig(ctx context.Context, orgID valuer.UUID) error {
|
||||
return module.pkgDashboardModule.SetDefaultConfig(ctx, orgID)
|
||||
}
|
||||
|
||||
func (module *module) LockUnlock(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, isAdmin bool, lock bool) error {
|
||||
return module.pkgDashboardModule.LockUnlock(ctx, orgID, id, updatedBy, isAdmin, lock)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
cd frontend && yarn run commitlint --edit $1
|
||||
cd frontend && pnpm run commitlint --edit $1
|
||||
|
||||
branch="$(git rev-parse --abbrev-ref HEAD)"
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
. "$(dirname "$0")/_/husky.sh"
|
||||
|
||||
cd frontend && yarn lint-staged
|
||||
cd frontend && pnpm lint-staged
|
||||
|
||||
@@ -1 +1,4 @@
|
||||
registry = 'https://registry.npmjs.org/'
|
||||
registry = 'https://registry.npmjs.org/'
|
||||
|
||||
public-hoist-pattern[]=@commitlint*
|
||||
public-hoist-pattern[]=commitlint
|
||||
@@ -28,8 +28,8 @@ Follow the steps below
|
||||
1. ```git clone https://github.com/SigNoz/signoz.git && cd signoz/frontend```
|
||||
1. change baseURL to ```<test environment URL>``` in file ```src/constants/env.ts```
|
||||
|
||||
1. ```yarn install```
|
||||
1. ```yarn dev```
|
||||
1. ```pnpm install```
|
||||
1. ```pnpm dev```
|
||||
|
||||
```Note: Please ping us in #contributing channel in our slack community and we will DM you with <test environment URL>```
|
||||
|
||||
@@ -41,7 +41,7 @@ This project was bootstrapped with [Create React App](https://github.com/faceboo
|
||||
|
||||
In the project directory, you can run:
|
||||
|
||||
### `yarn start`
|
||||
### `pnpm start`
|
||||
|
||||
Runs the app in the development mode.\
|
||||
Open [http://localhost:3301](http://localhost:3301) to view it in the browser.
|
||||
@@ -49,12 +49,12 @@ Open [http://localhost:3301](http://localhost:3301) to view it in the browser.
|
||||
The page will reload if you make edits.\
|
||||
You will also see any lint errors in the console.
|
||||
|
||||
### `yarn test`
|
||||
### `pnpm test`
|
||||
|
||||
Launches the test runner in the interactive watch mode.\
|
||||
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
|
||||
|
||||
### `yarn build`
|
||||
### `pnpm build`
|
||||
|
||||
Builds the app for production to the `build` folder.\
|
||||
It correctly bundles React in production mode and optimizes the build for the best performance.
|
||||
@@ -64,7 +64,7 @@ Your app is ready to be deployed!
|
||||
|
||||
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
|
||||
|
||||
### `yarn eject`
|
||||
### `pnpm eject`
|
||||
|
||||
**Note: this is a one-way operation. Once you `eject`, you can’t go back!**
|
||||
|
||||
@@ -100,6 +100,6 @@ This section has moved here: [https://facebook.github.io/create-react-app/docs/a
|
||||
|
||||
This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment)
|
||||
|
||||
### `yarn build` fails to minify
|
||||
### `pnpm build` fails to minify
|
||||
|
||||
This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify)
|
||||
|
||||
@@ -46,7 +46,11 @@ const config: Config.InitialOptions = {
|
||||
},
|
||||
transformIgnorePatterns: [
|
||||
// @chenglou/pretext is ESM-only; @signozhq/ui pulls it in via text-ellipsis.
|
||||
'node_modules/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@chenglou/pretext|@signozhq/design-tokens|@signozhq/table|@signozhq/calendar|@signozhq/input|@signozhq/popover|@signozhq/*|date-fns|d3-interpolate|d3-color|api|@codemirror|@lezer|@marijn|@grafana|nuqs)/)',
|
||||
// Pattern 1: allow .pnpm virtual store through (handled by pattern 2), plus root-level ESM packages.
|
||||
'node_modules/(?!(\\.pnpm|lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@chenglou/pretext|@signozhq/design-tokens|@signozhq|date-fns|d3-interpolate|d3-color|api|@codemirror|@lezer|@marijn|@grafana|nuqs|uuid)/)',
|
||||
// Pattern 2: pnpm virtual store — ignore everything except ESM-only packages.
|
||||
// pnpm encodes scoped packages as @scope+name@version, so match on scope prefix.
|
||||
'node_modules/\\.pnpm/(?!(lodash-es|react-dnd|core-dnd|@react-dnd|dnd-core|react-dnd-html5-backend|axios|@chenglou|@signozhq|date-fns|d3-interpolate|d3-color|api|@codemirror|@lezer|@marijn|@grafana|nuqs|uuid)[^/]*/node_modules)',
|
||||
],
|
||||
setupFilesAfterEnv: ['<rootDir>/jest.setup.ts'],
|
||||
testPathIgnorePatterns: ['/node_modules/', '/public/'],
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
* Adds custom matchers from the react testing library to all tests
|
||||
*/
|
||||
import '@testing-library/jest-dom';
|
||||
import '@testing-library/jest-dom/extend-expect';
|
||||
import 'jest-styled-components';
|
||||
|
||||
import { server } from './src/mocks-server/server';
|
||||
|
||||
@@ -80,7 +80,7 @@ export default defineConfig({
|
||||
header: (info: { title: string; version: string }): string[] => [
|
||||
`! Do not edit manually`,
|
||||
`* The file has been auto-generated using Orval for SigNoz`,
|
||||
`* regenerate with 'yarn generate:api'`,
|
||||
`* regenerate with 'pnpm generate:api'`,
|
||||
...(info.title ? [info.title] : []),
|
||||
...(info.version ? [`OpenAPI spec version: ${info.version}`] : []),
|
||||
],
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
"jest": "jest",
|
||||
"jest:coverage": "jest --coverage",
|
||||
"jest:watch": "jest --watch",
|
||||
"postinstall": "yarn i18n:generate-hash && (is-ci || yarn husky:configure) && node scripts/update-registry.cjs",
|
||||
"postinstall": "pnpm i18n:generate-hash && (is-ci || pnpm husky:configure) && node scripts/update-registry.cjs",
|
||||
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
||||
"commitlint": "commitlint --edit $1",
|
||||
"test": "jest",
|
||||
@@ -34,6 +34,8 @@
|
||||
"@ant-design/colors": "6.0.0",
|
||||
"@codemirror/autocomplete": "6.18.6",
|
||||
"@codemirror/lang-javascript": "6.2.3",
|
||||
"@codemirror/state": "6.5.2",
|
||||
"@codemirror/view": "6.36.6",
|
||||
"@dnd-kit/core": "6.1.0",
|
||||
"@dnd-kit/modifiers": "7.0.0",
|
||||
"@dnd-kit/sortable": "8.0.0",
|
||||
@@ -103,6 +105,7 @@
|
||||
"overlayscrollbars-react": "^0.5.6",
|
||||
"papaparse": "5.4.1",
|
||||
"posthog-js": "1.298.0",
|
||||
"rc-select": "14.10.0",
|
||||
"rc-tween-one": "3.0.6",
|
||||
"react": "18.2.0",
|
||||
"react-addons-update": "15.6.3",
|
||||
@@ -166,8 +169,8 @@
|
||||
"@babel/preset-env": "^7.22.14",
|
||||
"@babel/preset-react": "^7.12.13",
|
||||
"@babel/preset-typescript": "^7.21.4",
|
||||
"@commitlint/cli": "^20.4.2",
|
||||
"@commitlint/config-conventional": "^20.4.2",
|
||||
"@commitlint/cli": "20.4.4",
|
||||
"@commitlint/config-conventional": "20.4.4",
|
||||
"@faker-js/faker": "9.3.0",
|
||||
"@jest/globals": "30.2.0",
|
||||
"@testing-library/jest-dom": "5.16.5",
|
||||
@@ -177,8 +180,11 @@
|
||||
"@types/crypto-js": "4.2.2",
|
||||
"@types/dompurify": "^2.4.0",
|
||||
"@types/event-source-polyfill": "^1.0.0",
|
||||
"@types/d3-hierarchy": "1.1.11",
|
||||
"@types/fontfaceobserver": "2.1.0",
|
||||
"@types/history": "4.7.11",
|
||||
"@types/jest": "30.0.0",
|
||||
"@jest/types": "30.2.0",
|
||||
"@types/lodash-es": "^4.17.4",
|
||||
"@types/mini-css-extract-plugin": "^2.5.1",
|
||||
"@types/node": "^16.10.3",
|
||||
@@ -196,11 +202,13 @@
|
||||
"@types/react-syntax-highlighter": "15.5.13",
|
||||
"@types/redux-mock-store": "1.0.4",
|
||||
"@types/styled-components": "^5.1.4",
|
||||
"@types/testing-library__jest-dom": "^5.14.5",
|
||||
"@types/uuid": "^8.3.1",
|
||||
"@typescript/native-preview": "7.0.0-dev.20260421.2",
|
||||
"@typescript/native-preview": "7.0.0-dev.20260430.1",
|
||||
"autoprefixer": "10.4.19",
|
||||
"babel-plugin-styled-components": "^1.12.0",
|
||||
"eslint-plugin-sonarjs": "4.0.2",
|
||||
"glob": "^13.0.6",
|
||||
"husky": "^7.0.4",
|
||||
"imagemin": "^8.0.1",
|
||||
"imagemin-svgo": "^10.0.1",
|
||||
@@ -211,7 +219,7 @@
|
||||
"lint-staged": "^12.5.0",
|
||||
"msw": "1.3.2",
|
||||
"npm-run-all": "latest",
|
||||
"orval": "7.18.0",
|
||||
"orval": "7.21.0",
|
||||
"oxfmt": "0.47.0",
|
||||
"oxlint": "1.62.0",
|
||||
"oxlint-tsgolint": "0.22.1",
|
||||
|
||||
22419
frontend/pnpm-lock.yaml
generated
Normal file
22419
frontend/pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
@@ -16,7 +16,7 @@ echo "\n✅ Tag files renamed to index.ts"
|
||||
|
||||
# Format generated files
|
||||
echo "\n\n---\nRunning prettier...\n"
|
||||
if ! yarn prettify src/api/generated; then
|
||||
if ! pnpm prettify src/api/generated; then
|
||||
echo "Formatting failed!"
|
||||
exit 1
|
||||
fi
|
||||
@@ -25,7 +25,7 @@ echo "\n✅ Formatting successful"
|
||||
|
||||
# Fix linting issues
|
||||
echo "\n\n---\nRunning lint...\n"
|
||||
if ! yarn lint:generated; then
|
||||
if ! pnpm lint:generated; then
|
||||
echo "Lint check failed! Please fix linting errors before proceeding."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation } from 'react-query';
|
||||
@@ -17,11 +17,13 @@ import type {
|
||||
InframonitoringtypesPostableNamespacesDTO,
|
||||
InframonitoringtypesPostableNodesDTO,
|
||||
InframonitoringtypesPostablePodsDTO,
|
||||
InframonitoringtypesPostableVolumesDTO,
|
||||
ListClusters200,
|
||||
ListHosts200,
|
||||
ListNamespaces200,
|
||||
ListNodes200,
|
||||
ListPods200,
|
||||
ListVolumes200,
|
||||
RenderErrorResponseDTO,
|
||||
} from '../sigNoz.schemas';
|
||||
|
||||
@@ -448,3 +450,87 @@ export const useListPods = <
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* Returns a paginated list of Kubernetes persistent volume claims (PVCs) with key volume metrics: available bytes, capacity bytes, usage (capacity - available), inodes, free inodes, and used inodes. Each row also includes metadata attributes (k8s.persistentvolumeclaim.name, k8s.pod.uid, k8s.pod.name, k8s.namespace.name, k8s.node.name, k8s.statefulset.name, k8s.cluster.name). Supports filtering via a filter expression, custom groupBy to aggregate volumes by any attribute, ordering by any of the six metrics (available, capacity, usage, inodes, inodes_free, inodes_used), and pagination via offset/limit. The response type is 'list' for the default k8s.persistentvolumeclaim.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates volumes in the group. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (volumeAvailable, volumeCapacity, volumeUsage, volumeInodes, volumeInodesFree, volumeInodesUsed) return -1 as a sentinel when no data is available for that field.
|
||||
* @summary List Volumes for Infra Monitoring
|
||||
*/
|
||||
export const listVolumes = (
|
||||
inframonitoringtypesPostableVolumesDTO: BodyType<InframonitoringtypesPostableVolumesDTO>,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<ListVolumes200>({
|
||||
url: `/api/v2/infra_monitoring/pvcs`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: inframonitoringtypesPostableVolumesDTO,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getListVolumesMutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listVolumes>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableVolumesDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listVolumes>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableVolumesDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['listVolumes'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof listVolumes>>,
|
||||
{ data: BodyType<InframonitoringtypesPostableVolumesDTO> }
|
||||
> = (props) => {
|
||||
const { data } = props ?? {};
|
||||
|
||||
return listVolumes(data);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type ListVolumesMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof listVolumes>>
|
||||
>;
|
||||
export type ListVolumesMutationBody =
|
||||
BodyType<InframonitoringtypesPostableVolumesDTO>;
|
||||
export type ListVolumesMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary List Volumes for Infra Monitoring
|
||||
*/
|
||||
export const useListVolumes = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown,
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof listVolumes>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableVolumesDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof listVolumes>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesPostableVolumesDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getListVolumesMutationOptions(options);
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
export interface AlertmanagertypesChannelDTO {
|
||||
@@ -4186,10 +4186,6 @@ export interface DashboardtypesDashboardDTO {
|
||||
* @type string
|
||||
*/
|
||||
org_id?: string;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
source?: string;
|
||||
/**
|
||||
* @type string
|
||||
* @format date-time
|
||||
@@ -5089,6 +5085,34 @@ export interface InframonitoringtypesPostablePodsDTO {
|
||||
start: number;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesPostableVolumesDTO {
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
end: number;
|
||||
filter?: Querybuildertypesv5FilterDTO;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
limit: number;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
offset?: number;
|
||||
orderBy?: Querybuildertypesv5OrderByDTO;
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
start: number;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesRequiredMetricsCheckDTO {
|
||||
/**
|
||||
* @type array
|
||||
@@ -5101,6 +5125,74 @@ export enum InframonitoringtypesResponseTypeDTO {
|
||||
list = 'list',
|
||||
grouped_list = 'grouped_list',
|
||||
}
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
export type InframonitoringtypesVolumeRecordDTOMeta = {
|
||||
[key: string]: string;
|
||||
} | null;
|
||||
|
||||
export interface InframonitoringtypesVolumeRecordDTO {
|
||||
/**
|
||||
* @type object
|
||||
* @nullable true
|
||||
*/
|
||||
meta: InframonitoringtypesVolumeRecordDTOMeta;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
persistentVolumeClaimName: string;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
volumeAvailable: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
volumeCapacity: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
volumeInodes: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
volumeInodesFree: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
volumeInodesUsed: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
volumeUsage: number;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesVolumesDTO {
|
||||
/**
|
||||
* @type boolean
|
||||
*/
|
||||
endTimeBeforeRetention: boolean;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
records: InframonitoringtypesVolumeRecordDTO[] | null;
|
||||
requiredMetricsCheck: InframonitoringtypesRequiredMetricsCheckDTO;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
total: number;
|
||||
type: InframonitoringtypesResponseTypeDTO;
|
||||
warning?: Querybuildertypesv5QueryWarnDataDTO;
|
||||
}
|
||||
|
||||
export interface LlmpricingruletypesGettablePricingRulesDTO {
|
||||
/**
|
||||
* @type array
|
||||
@@ -9449,6 +9541,14 @@ export type ListPods200 = {
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type ListVolumes200 = {
|
||||
data: InframonitoringtypesVolumesDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type Livez200 = {
|
||||
data: FactoryResponseDTO;
|
||||
/**
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* * regenerate with 'pnpm generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import { useMutation, useQuery } from 'react-query';
|
||||
|
||||
@@ -118,6 +118,7 @@ export const hostColumnsConfig: TableColumnDef<HostData>[] = [
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpu}
|
||||
entity={InfraMonitoringEntity.HOSTS}
|
||||
attribute="CPU metric"
|
||||
>
|
||||
<EntityProgressBar value={cpu} type="cpu" />
|
||||
</ValidateColumnValueWrapper>
|
||||
@@ -145,6 +146,7 @@ export const hostColumnsConfig: TableColumnDef<HostData>[] = [
|
||||
<ValidateColumnValueWrapper
|
||||
value={memory}
|
||||
entity={InfraMonitoringEntity.HOSTS}
|
||||
attribute="memory metric"
|
||||
>
|
||||
<EntityProgressBar value={memory} type="memory" />
|
||||
</ValidateColumnValueWrapper>
|
||||
@@ -162,8 +164,15 @@ export const hostColumnsConfig: TableColumnDef<HostData>[] = [
|
||||
enableSort: true,
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const wait = value as number;
|
||||
|
||||
return (
|
||||
<TanStackTable.Text>{`${Number((wait * 100).toFixed(1))}%`}</TanStackTable.Text>
|
||||
<ValidateColumnValueWrapper
|
||||
value={wait}
|
||||
entity={InfraMonitoringEntity.HOSTS}
|
||||
attribute="IOWait metric"
|
||||
>
|
||||
<TanStackTable.Text>{`${Number((wait * 100).toFixed(1))}%`}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
},
|
||||
},
|
||||
@@ -175,8 +184,18 @@ export const hostColumnsConfig: TableColumnDef<HostData>[] = [
|
||||
accessorFn: (row): number => row.load15,
|
||||
width: { min: 100, default: 100 },
|
||||
enableSort: true,
|
||||
cell: ({ value }): React.ReactNode => (
|
||||
<TanStackTable.Text>{value as number}</TanStackTable.Text>
|
||||
),
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const load15 = value as number;
|
||||
|
||||
return (
|
||||
<ValidateColumnValueWrapper
|
||||
value={load15}
|
||||
entity={InfraMonitoringEntity.HOSTS}
|
||||
attribute="load average metric"
|
||||
>
|
||||
<TanStackTable.Text>{load15}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
@@ -7,6 +7,7 @@ import EntityGroupHeader from '../Base/EntityGroupHeader';
|
||||
import K8sGroupCell from '../Base/K8sGroupCell';
|
||||
import { formatBytes } from '../commonUtils';
|
||||
import { ValidateColumnValueWrapper } from '../components';
|
||||
import { InfraMonitoringEntity } from '../constants';
|
||||
import { K8sClusterData, K8sClustersListPayload } from './api';
|
||||
import { Boxes } from '@signozhq/icons';
|
||||
|
||||
@@ -85,7 +86,11 @@ export const k8sClustersColumnsConfig: TableColumnDef<K8sClusterData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const cpu = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={cpu}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpu}
|
||||
entity={InfraMonitoringEntity.CLUSTERS}
|
||||
attribute="CPU metric"
|
||||
>
|
||||
<TanStackTable.Text>{cpu}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -100,7 +105,11 @@ export const k8sClustersColumnsConfig: TableColumnDef<K8sClusterData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const cpuAllocatable = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={cpuAllocatable}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpuAllocatable}
|
||||
entity={InfraMonitoringEntity.CLUSTERS}
|
||||
attribute="CPU allocatable metric"
|
||||
>
|
||||
<TanStackTable.Text>{cpuAllocatable}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -115,7 +124,11 @@ export const k8sClustersColumnsConfig: TableColumnDef<K8sClusterData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const memory = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={memory}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={memory}
|
||||
entity={InfraMonitoringEntity.CLUSTERS}
|
||||
attribute="memory metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(memory)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -130,7 +143,11 @@ export const k8sClustersColumnsConfig: TableColumnDef<K8sClusterData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const memoryAllocatable = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={memoryAllocatable}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={memoryAllocatable}
|
||||
entity={InfraMonitoringEntity.CLUSTERS}
|
||||
attribute="memory allocatable metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(memoryAllocatable)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
|
||||
@@ -93,7 +93,11 @@ export const k8sDaemonSetsColumnsConfig: TableColumnDef<K8sDaemonSetsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const availableNodes = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={availableNodes}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={availableNodes}
|
||||
entity={InfraMonitoringEntity.DAEMONSETS}
|
||||
attribute="available node"
|
||||
>
|
||||
<TanStackTable.Text>{availableNodes}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -108,7 +112,11 @@ export const k8sDaemonSetsColumnsConfig: TableColumnDef<K8sDaemonSetsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const desiredNodes = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={desiredNodes}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={desiredNodes}
|
||||
entity={InfraMonitoringEntity.DAEMONSETS}
|
||||
attribute="desired node"
|
||||
>
|
||||
<TanStackTable.Text>{desiredNodes}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -162,7 +170,11 @@ export const k8sDaemonSetsColumnsConfig: TableColumnDef<K8sDaemonSetsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const cpu = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={cpu}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpu}
|
||||
entity={InfraMonitoringEntity.DAEMONSETS}
|
||||
attribute="CPU metric"
|
||||
>
|
||||
<TanStackTable.Text>{cpu}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -216,7 +228,11 @@ export const k8sDaemonSetsColumnsConfig: TableColumnDef<K8sDaemonSetsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const memory = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={memory}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={memory}
|
||||
entity={InfraMonitoringEntity.DAEMONSETS}
|
||||
attribute="memory metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(memory)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
|
||||
@@ -92,7 +92,11 @@ export const k8sDeploymentsColumnsConfig: TableColumnDef<K8sDeploymentsData>[] =
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const availablePods = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={availablePods}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={availablePods}
|
||||
entity={InfraMonitoringEntity.DEPLOYMENTS}
|
||||
attribute="available pod"
|
||||
>
|
||||
<TanStackTable.Text>{availablePods}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -109,7 +113,11 @@ export const k8sDeploymentsColumnsConfig: TableColumnDef<K8sDeploymentsData>[] =
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const desiredPods = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={desiredPods}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={desiredPods}
|
||||
entity={InfraMonitoringEntity.DEPLOYMENTS}
|
||||
attribute="desired pod"
|
||||
>
|
||||
<TanStackTable.Text>{desiredPods}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -165,7 +173,11 @@ export const k8sDeploymentsColumnsConfig: TableColumnDef<K8sDeploymentsData>[] =
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const cpu = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={cpu}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpu}
|
||||
entity={InfraMonitoringEntity.DEPLOYMENTS}
|
||||
attribute="CPU metric"
|
||||
>
|
||||
<TanStackTable.Text>{cpu}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -221,7 +233,11 @@ export const k8sDeploymentsColumnsConfig: TableColumnDef<K8sDeploymentsData>[] =
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const memory = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={memory}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={memory}
|
||||
entity={InfraMonitoringEntity.DEPLOYMENTS}
|
||||
attribute="memory metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(memory)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
|
||||
@@ -89,7 +89,11 @@ export const k8sJobsColumnsConfig: TableColumnDef<K8sJobsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const successfulPods = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={successfulPods}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={successfulPods}
|
||||
entity={InfraMonitoringEntity.JOBS}
|
||||
attribute="successful pod"
|
||||
>
|
||||
<TanStackTable.Text>{successfulPods}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -104,7 +108,11 @@ export const k8sJobsColumnsConfig: TableColumnDef<K8sJobsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const failedPods = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={failedPods}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={failedPods}
|
||||
entity={InfraMonitoringEntity.JOBS}
|
||||
attribute="failed pod"
|
||||
>
|
||||
<TanStackTable.Text>{failedPods}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -119,7 +127,11 @@ export const k8sJobsColumnsConfig: TableColumnDef<K8sJobsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const desiredSuccessfulPods = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={desiredSuccessfulPods}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={desiredSuccessfulPods}
|
||||
entity={InfraMonitoringEntity.JOBS}
|
||||
attribute="desired successful pod"
|
||||
>
|
||||
<TanStackTable.Text>{desiredSuccessfulPods}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -134,7 +146,11 @@ export const k8sJobsColumnsConfig: TableColumnDef<K8sJobsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const activePods = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={activePods}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={activePods}
|
||||
entity={InfraMonitoringEntity.JOBS}
|
||||
attribute="active pod"
|
||||
>
|
||||
<TanStackTable.Text>{activePods}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -187,7 +203,11 @@ export const k8sJobsColumnsConfig: TableColumnDef<K8sJobsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const cpu = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={cpu}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpu}
|
||||
entity={InfraMonitoringEntity.JOBS}
|
||||
attribute="CPU metric"
|
||||
>
|
||||
<TanStackTable.Text>{cpu}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -240,7 +260,11 @@ export const k8sJobsColumnsConfig: TableColumnDef<K8sJobsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const memory = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={memory}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={memory}
|
||||
entity={InfraMonitoringEntity.JOBS}
|
||||
attribute="memory metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(memory)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
|
||||
@@ -6,6 +6,7 @@ import EntityGroupHeader from '../Base/EntityGroupHeader';
|
||||
import K8sGroupCell from '../Base/K8sGroupCell';
|
||||
import { formatBytes } from '../commonUtils';
|
||||
import { ValidateColumnValueWrapper } from '../components';
|
||||
import { InfraMonitoringEntity } from '../constants';
|
||||
import { K8sNamespacesData, K8sNamespacesListPayload } from './api';
|
||||
import { FilePenLine } from '@signozhq/icons';
|
||||
|
||||
@@ -90,7 +91,11 @@ export const k8sNamespacesColumnsConfig: TableColumnDef<K8sNamespacesData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const cpu = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={cpu}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpu}
|
||||
entity={InfraMonitoringEntity.NAMESPACES}
|
||||
attribute="CPU metric"
|
||||
>
|
||||
<TanStackTable.Text>{cpu}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -105,7 +110,11 @@ export const k8sNamespacesColumnsConfig: TableColumnDef<K8sNamespacesData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const memory = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={memory}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={memory}
|
||||
entity={InfraMonitoringEntity.NAMESPACES}
|
||||
attribute="memory metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(memory)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
|
||||
@@ -7,6 +7,7 @@ import EntityGroupHeader from '../Base/EntityGroupHeader';
|
||||
import K8sGroupCell from '../Base/K8sGroupCell';
|
||||
import { formatBytes } from '../commonUtils';
|
||||
import { ValidateColumnValueWrapper } from '../components';
|
||||
import { InfraMonitoringEntity } from '../constants';
|
||||
import { K8sNodeData, K8sNodesListPayload } from './api';
|
||||
import { Workflow } from '@signozhq/icons';
|
||||
|
||||
@@ -91,7 +92,11 @@ export const k8sNodesColumnsConfig: TableColumnDef<K8sNodeData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const cpu = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={cpu}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpu}
|
||||
entity={InfraMonitoringEntity.NODES}
|
||||
attribute="CPU metric"
|
||||
>
|
||||
<TanStackTable.Text>{cpu}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -106,7 +111,11 @@ export const k8sNodesColumnsConfig: TableColumnDef<K8sNodeData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const cpuAllocatable = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={cpuAllocatable}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpuAllocatable}
|
||||
entity={InfraMonitoringEntity.NODES}
|
||||
attribute="CPU allocatable metric"
|
||||
>
|
||||
<TanStackTable.Text>{cpuAllocatable}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -121,7 +130,11 @@ export const k8sNodesColumnsConfig: TableColumnDef<K8sNodeData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const memory = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={memory}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={memory}
|
||||
entity={InfraMonitoringEntity.NODES}
|
||||
attribute="memory metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(memory)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -136,7 +149,11 @@ export const k8sNodesColumnsConfig: TableColumnDef<K8sNodeData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const memoryAllocatable = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={memoryAllocatable}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={memoryAllocatable}
|
||||
entity={InfraMonitoringEntity.NODES}
|
||||
attribute="memory allocatable metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(memoryAllocatable)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
|
||||
@@ -112,7 +112,11 @@ export const k8sPodColumnsConfig: TableColumnDef<K8sPodsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const cpu = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={cpu}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpu}
|
||||
entity={InfraMonitoringEntity.PODS}
|
||||
attribute="CPU metric"
|
||||
>
|
||||
<TanStackTable.Text>{cpu}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -165,7 +169,11 @@ export const k8sPodColumnsConfig: TableColumnDef<K8sPodsData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const memory = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={memory}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={memory}
|
||||
entity={InfraMonitoringEntity.PODS}
|
||||
attribute="memory metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(memory)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
|
||||
@@ -100,7 +100,11 @@ export const k8sStatefulSetsColumnsConfig: TableColumnDef<K8sStatefulSetsData>[]
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const availablePods = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={availablePods}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={availablePods}
|
||||
entity={InfraMonitoringEntity.STATEFULSETS}
|
||||
attribute="available pod"
|
||||
>
|
||||
<TanStackTable.Text>{availablePods}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -116,7 +120,11 @@ export const k8sStatefulSetsColumnsConfig: TableColumnDef<K8sStatefulSetsData>[]
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const desiredPods = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={desiredPods}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={desiredPods}
|
||||
entity={InfraMonitoringEntity.STATEFULSETS}
|
||||
attribute="desired pod"
|
||||
>
|
||||
<TanStackTable.Text>{desiredPods}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -173,7 +181,11 @@ export const k8sStatefulSetsColumnsConfig: TableColumnDef<K8sStatefulSetsData>[]
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const cpu = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={cpu}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={cpu}
|
||||
entity={InfraMonitoringEntity.STATEFULSETS}
|
||||
attribute="CPU metric"
|
||||
>
|
||||
<TanStackTable.Text>{cpu}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -230,7 +242,11 @@ export const k8sStatefulSetsColumnsConfig: TableColumnDef<K8sStatefulSetsData>[]
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const memory = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={memory}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={memory}
|
||||
entity={InfraMonitoringEntity.STATEFULSETS}
|
||||
attribute="memory metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(memory)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
|
||||
@@ -7,6 +7,7 @@ import EntityGroupHeader from '../Base/EntityGroupHeader';
|
||||
import K8sGroupCell from '../Base/K8sGroupCell';
|
||||
import { formatBytes } from '../commonUtils';
|
||||
import { ValidateColumnValueWrapper } from '../components';
|
||||
import { InfraMonitoringEntity } from '../constants';
|
||||
import { K8sVolumesData } from './api';
|
||||
import { HardDrive } from '@signozhq/icons';
|
||||
|
||||
@@ -92,7 +93,11 @@ export const k8sVolumesColumnsConfig: TableColumnDef<K8sVolumesData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const capacity = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={capacity}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={capacity}
|
||||
entity={InfraMonitoringEntity.VOLUMES}
|
||||
attribute="capacity metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(capacity)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -107,7 +112,11 @@ export const k8sVolumesColumnsConfig: TableColumnDef<K8sVolumesData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const usage = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={usage}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={usage}
|
||||
entity={InfraMonitoringEntity.VOLUMES}
|
||||
attribute="utilization metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(usage)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
@@ -122,7 +131,11 @@ export const k8sVolumesColumnsConfig: TableColumnDef<K8sVolumesData>[] = [
|
||||
cell: ({ value }): React.ReactNode => {
|
||||
const available = value as number;
|
||||
return (
|
||||
<ValidateColumnValueWrapper value={available}>
|
||||
<ValidateColumnValueWrapper
|
||||
value={available}
|
||||
entity={InfraMonitoringEntity.VOLUMES}
|
||||
attribute="available metric"
|
||||
>
|
||||
<TanStackTable.Text>{formatBytes(available)}</TanStackTable.Text>
|
||||
</ValidateColumnValueWrapper>
|
||||
);
|
||||
|
||||
@@ -36,7 +36,7 @@ import { ILog } from 'types/api/logs/log';
|
||||
import { DataSource, StringOperators } from 'types/common/queryBuilder';
|
||||
|
||||
import loadingPlaneUrl from '@/assets/Icons/loading-plane.gif';
|
||||
import { getAbsoluteUrl } from '@/utils/basePath';
|
||||
import { getAbsoluteUrl } from 'utils/basePath';
|
||||
|
||||
import { LiveLogsListProps } from './types';
|
||||
|
||||
|
||||
@@ -27,14 +27,13 @@ function JSONView({ logData }: JSONViewProps): JSX.Element {
|
||||
minimap: {
|
||||
enabled: false,
|
||||
},
|
||||
fontWeight: 400,
|
||||
fontWeight: '400',
|
||||
// fontFamily: 'SF Mono',
|
||||
fontFamily: 'Geist Mono',
|
||||
fontSize: 13,
|
||||
lineHeight: '18px',
|
||||
lineHeight: 18,
|
||||
colorDecorators: true,
|
||||
scrollBeyondLastLine: false,
|
||||
decorationsOverviewRuler: false,
|
||||
scrollbar: {
|
||||
vertical: 'hidden',
|
||||
horizontal: 'hidden',
|
||||
|
||||
@@ -50,15 +50,14 @@ function Overview({
|
||||
const options: EditorProps['options'] = {
|
||||
automaticLayout: true,
|
||||
readOnly: true,
|
||||
height: '40vh',
|
||||
wordWrap: isWrapWord ? 'on' : 'off',
|
||||
minimap: {
|
||||
enabled: false,
|
||||
},
|
||||
fontWeight: 400,
|
||||
fontWeight: '400',
|
||||
fontFamily: 'Geist Mono',
|
||||
fontSize: 13,
|
||||
lineHeight: '18px',
|
||||
lineHeight: 18,
|
||||
colorDecorators: true,
|
||||
scrollBeyondLastLine: false,
|
||||
scrollbar: {
|
||||
|
||||
@@ -38,7 +38,7 @@ import APIError from 'types/api/error';
|
||||
import { ILog } from 'types/api/logs/log';
|
||||
import { DataSource, StringOperators } from 'types/common/queryBuilder';
|
||||
|
||||
import { getAbsoluteUrl } from '@/utils/basePath';
|
||||
import { getAbsoluteUrl } from 'utils/basePath';
|
||||
|
||||
import NoLogs from '../NoLogs/NoLogs';
|
||||
import { LogsExplorerListProps } from './LogsExplorerList.interfaces';
|
||||
|
||||
@@ -17,13 +17,12 @@ const editorOptions: EditorProps['options'] = {
|
||||
readOnly: true,
|
||||
wordWrap: 'on',
|
||||
minimap: { enabled: false },
|
||||
fontWeight: 400,
|
||||
fontWeight: '400',
|
||||
fontFamily: 'SF Mono, Geist Mono, Fira Code, monospace',
|
||||
fontSize: 12,
|
||||
lineHeight: '18px',
|
||||
lineHeight: 18,
|
||||
colorDecorators: true,
|
||||
scrollBeyondLastLine: false,
|
||||
decorationsOverviewRuler: false,
|
||||
scrollbar: { vertical: 'hidden', horizontal: 'hidden' },
|
||||
folding: false,
|
||||
};
|
||||
|
||||
@@ -42,11 +42,7 @@
|
||||
"name": "typescript-plugin-css-modules"
|
||||
}
|
||||
],
|
||||
"types": [
|
||||
"vite/client",
|
||||
"node",
|
||||
"jest"
|
||||
]
|
||||
"types": ["vite/client", "node", "jest", "testing-library__jest-dom"]
|
||||
},
|
||||
"exclude": [
|
||||
"node_modules",
|
||||
|
||||
19448
frontend/yarn.lock
19448
frontend/yarn.lock
File diff suppressed because it is too large
Load Diff
@@ -105,5 +105,24 @@ func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := router.Handle("/api/v2/infra_monitoring/pvcs", handler.New(
|
||||
provider.authzMiddleware.ViewAccess(provider.infraMonitoringHandler.ListVolumes),
|
||||
handler.OpenAPIDef{
|
||||
ID: "ListVolumes",
|
||||
Tags: []string{"inframonitoring"},
|
||||
Summary: "List Volumes for Infra Monitoring",
|
||||
Description: "Returns a paginated list of Kubernetes persistent volume claims (PVCs) with key volume metrics: available bytes, capacity bytes, usage (capacity - available), inodes, free inodes, and used inodes. Each row also includes metadata attributes (k8s.persistentvolumeclaim.name, k8s.pod.uid, k8s.pod.name, k8s.namespace.name, k8s.node.name, k8s.statefulset.name, k8s.cluster.name). Supports filtering via a filter expression, custom groupBy to aggregate volumes by any attribute, ordering by any of the six metrics (available, capacity, usage, inodes, inodes_free, inodes_used), and pagination via offset/limit. The response type is 'list' for the default k8s.persistentvolumeclaim.name grouping or 'grouped_list' for custom groupBy keys; in both modes every row aggregates volumes in the group. Also reports missing required metrics and whether the requested time range falls before the data retention boundary. Numeric metric fields (volumeAvailable, volumeCapacity, volumeUsage, volumeInodes, volumeInodesFree, volumeInodesUsed) return -1 as a sentinel when no data is available for that field.",
|
||||
Request: new(inframonitoringtypes.PostableVolumes),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(inframonitoringtypes.Volumes),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -42,11 +42,6 @@ type Module interface {
|
||||
|
||||
Update(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, data dashboardtypes.UpdatableDashboard, diff int) (*dashboardtypes.Dashboard, error)
|
||||
|
||||
// ResetSystemDashboard puts back default value for the system dashboard keyed by source.
|
||||
ResetSystemDashboard(ctx context.Context, orgID valuer.UUID, source dashboardtypes.Source, updatedBy string) (*dashboardtypes.Dashboard, error)
|
||||
|
||||
SetDefaultConfig(ctx context.Context, orgID valuer.UUID) error
|
||||
|
||||
LockUnlock(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, isAdmin bool, lock bool) error
|
||||
|
||||
Delete(ctx context.Context, orgID valuer.UUID, id valuer.UUID) error
|
||||
@@ -76,6 +71,4 @@ type Handler interface {
|
||||
LockUnlock(http.ResponseWriter, *http.Request)
|
||||
|
||||
Delete(http.ResponseWriter, *http.Request)
|
||||
|
||||
ResetSystemDashboard(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
@@ -185,38 +185,6 @@ func (handler *handler) LockUnlock(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
}
|
||||
|
||||
// ResetSystemDashboard only resets system dashboard (source != "") to default values.
|
||||
func (handler *handler) ResetSystemDashboard(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(ctx)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
source := mux.Vars(r)["source"]
|
||||
if source == "" {
|
||||
render.Error(rw, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "source path parameter is required"))
|
||||
return
|
||||
}
|
||||
|
||||
dashboard, err := handler.module.ResetSystemDashboard(ctx, orgID, dashboardtypes.Source(source), claims.Email)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, dashboard)
|
||||
}
|
||||
|
||||
func (handler *handler) Delete(rw http.ResponseWriter, r *http.Request) {
|
||||
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
@@ -38,7 +38,7 @@ func NewModule(store dashboardtypes.Store, settings factory.ProviderSettings, an
|
||||
}
|
||||
|
||||
func (module *module) Create(ctx context.Context, orgID valuer.UUID, createdBy string, creator valuer.UUID, postableDashboard dashboardtypes.PostableDashboard) (*dashboardtypes.Dashboard, error) {
|
||||
dashboard, err := dashboardtypes.NewDashboard(orgID, createdBy, postableDashboard, "")
|
||||
dashboard, err := dashboardtypes.NewDashboard(orgID, createdBy, postableDashboard)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -66,15 +66,6 @@ func (module *module) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID
|
||||
return dashboardtypes.NewDashboardFromStorableDashboard(storableDashboard), nil
|
||||
}
|
||||
|
||||
func (module *module) GetBySource(ctx context.Context, orgID valuer.UUID, source dashboardtypes.Source) (*dashboardtypes.Dashboard, error) {
|
||||
storableDashboard, err := module.store.GetBySource(ctx, orgID, string(source))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dashboardtypes.NewDashboardFromStorableDashboard(storableDashboard), nil
|
||||
}
|
||||
|
||||
func (module *module) List(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error) {
|
||||
storableDashboards, err := module.store.List(ctx, orgID)
|
||||
if err != nil {
|
||||
@@ -90,7 +81,8 @@ func (module *module) Update(ctx context.Context, orgID valuer.UUID, id valuer.U
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := dashboard.Update(ctx, updatableDashboard, updatedBy, diff); err != nil {
|
||||
err = dashboard.Update(ctx, updatableDashboard, updatedBy, diff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -99,87 +91,14 @@ func (module *module) Update(ctx context.Context, orgID valuer.UUID, id valuer.U
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := module.store.Update(ctx, orgID, storableDashboard); err != nil {
|
||||
err = module.store.Update(ctx, orgID, storableDashboard)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dashboard, nil
|
||||
}
|
||||
|
||||
func (module *module) insertSystemDashboard(ctx context.Context, orgID valuer.UUID, source dashboardtypes.Source, createdBy string) (*dashboardtypes.Dashboard, error) {
|
||||
dashboard, err := dashboardtypes.NewDefaultSystemDashboard(orgID, source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if createdBy != "" {
|
||||
dashboard.CreatedBy = createdBy
|
||||
dashboard.UpdatedBy = createdBy
|
||||
}
|
||||
|
||||
storable, err := dashboardtypes.NewStorableDashboardFromDashboard(dashboard)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := module.store.Create(ctx, storable); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dashboard, nil
|
||||
}
|
||||
|
||||
func (module *module) ResetSystemDashboard(ctx context.Context, orgID valuer.UUID, source dashboardtypes.Source, updatedBy string) (*dashboardtypes.Dashboard, error) {
|
||||
existing, err := module.GetBySource(ctx, orgID, source)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if existing == nil {
|
||||
return module.insertSystemDashboard(ctx, orgID, source, updatedBy)
|
||||
}
|
||||
|
||||
defaultDashboard, err := dashboardtypes.NewDefaultSystemDashboard(orgID, source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := existing.Update(ctx, defaultDashboard.Data, updatedBy, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storable, err := dashboardtypes.NewStorableDashboardFromDashboard(existing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := module.store.Update(ctx, orgID, storable); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return existing, nil
|
||||
}
|
||||
|
||||
// SetDefaultConfig seeds default values for system dashboards for newly created orgs.
|
||||
func (module *module) SetDefaultConfig(ctx context.Context, orgID valuer.UUID) error {
|
||||
for _, source := range dashboardtypes.SystemSources {
|
||||
existing, err := module.GetBySource(ctx, orgID, source)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return err
|
||||
}
|
||||
if existing != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := module.insertSystemDashboard(ctx, orgID, source, ""); err != nil {
|
||||
// No defaults set for the source skipping will populate in default overview followup pr.
|
||||
if errors.Ast(err, errors.TypeInvalidInput) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (module *module) LockUnlock(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, isAdmin bool, lock bool) error {
|
||||
dashboard, err := module.Get(ctx, orgID, id)
|
||||
if err != nil {
|
||||
@@ -209,11 +128,12 @@ func (module *module) Delete(ctx context.Context, orgID valuer.UUID, id valuer.U
|
||||
return err
|
||||
}
|
||||
|
||||
if err := dashboard.CanDelete(); err != nil {
|
||||
return err
|
||||
if dashboard.Locked {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "dashboard is locked, please unlock the dashboard to be delete it")
|
||||
}
|
||||
|
||||
if err := module.store.Delete(ctx, orgID, id); err != nil {
|
||||
err = module.store.Delete(ctx, orgID, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ func NewStore(sqlstore sqlstore.SQLStore) dashboardtypes.Store {
|
||||
func (store *store) Create(ctx context.Context, storabledashboard *dashboardtypes.StorableDashboard) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
BunDB().
|
||||
NewInsert().
|
||||
Model(storabledashboard).
|
||||
Exec(ctx)
|
||||
@@ -63,23 +63,6 @@ func (store *store) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID)
|
||||
return storableDashboard, nil
|
||||
}
|
||||
|
||||
func (store *store) GetBySource(ctx context.Context, orgID valuer.UUID, source string) (*dashboardtypes.StorableDashboard, error) {
|
||||
storableDashboard := new(dashboardtypes.StorableDashboard)
|
||||
err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
NewSelect().
|
||||
Model(storableDashboard).
|
||||
Where("org_id = ?", orgID).
|
||||
Where("source = ?", source).
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, store.sqlstore.WrapNotFoundErrf(err, errors.CodeNotFound, "system dashboard with source %s doesn't exist", source)
|
||||
}
|
||||
|
||||
return storableDashboard, nil
|
||||
}
|
||||
|
||||
func (store *store) GetPublic(ctx context.Context, dashboardID string) (*dashboardtypes.StorablePublicDashboard, error) {
|
||||
storable := new(dashboardtypes.StorablePublicDashboard)
|
||||
err := store.
|
||||
@@ -141,7 +124,6 @@ func (store *store) List(ctx context.Context, orgID valuer.UUID) ([]*dashboardty
|
||||
NewSelect().
|
||||
Model(&storableDashboards).
|
||||
Where("org_id = ?", orgID).
|
||||
Where("source = ?", "").
|
||||
Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -171,7 +153,7 @@ func (store *store) ListPublic(ctx context.Context, orgID valuer.UUID) ([]*dashb
|
||||
func (store *store) Update(ctx context.Context, orgID valuer.UUID, storableDashboard *dashboardtypes.StorableDashboard) error {
|
||||
_, err := store.
|
||||
sqlstore.
|
||||
BunDBCtx(ctx).
|
||||
BunDB().
|
||||
NewUpdate().
|
||||
Model(storableDashboard).
|
||||
WherePK().
|
||||
|
||||
@@ -141,3 +141,27 @@ func (h *handler) ListClusters(rw http.ResponseWriter, req *http.Request) {
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
func (h *handler) ListVolumes(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
var parsedReq inframonitoringtypes.PostableVolumes
|
||||
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.module.ListVolumes(req.Context(), orgID, &parsedReq)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
|
||||
@@ -528,3 +528,86 @@ func (m *module) ListClusters(ctx context.Context, orgID valuer.UUID, req *infra
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (m *module) ListVolumes(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableVolumes) (*inframonitoringtypes.Volumes, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &inframonitoringtypes.Volumes{}
|
||||
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: inframonitoringtypes.VolumesOrderByUsage,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
}
|
||||
}
|
||||
|
||||
if len(req.GroupBy) == 0 {
|
||||
req.GroupBy = []qbtypes.GroupByKey{pvcNameGroupByKey}
|
||||
resp.Type = inframonitoringtypes.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
|
||||
}
|
||||
|
||||
// Bake the volume base filter into req.Filter so all downstream helpers pick it up.
|
||||
if req.Filter == nil {
|
||||
req.Filter = &qbtypes.Filter{}
|
||||
}
|
||||
req.Filter.Expression = mergeFilterExpressions(volumesBaseFilterExpr, req.Filter.Expression)
|
||||
|
||||
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, volumesTableMetricNamesList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(missingMetrics) > 0 {
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
|
||||
resp.Records = []inframonitoringtypes.VolumeRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
if req.End < int64(minFirstReportedUnixMilli) {
|
||||
resp.EndTimeBeforeRetention = true
|
||||
resp.Records = []inframonitoringtypes.VolumeRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: []string{}}
|
||||
|
||||
metadataMap, err := m.getVolumesTableMetadata(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Total = len(metadataMap)
|
||||
|
||||
pageGroups, err := m.getTopVolumeGroups(ctx, orgID, req, metadataMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pageGroups) == 0 {
|
||||
resp.Records = []inframonitoringtypes.VolumeRecord{}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
filterExpr := ""
|
||||
if req.Filter != nil {
|
||||
filterExpr = req.Filter.Expression
|
||||
}
|
||||
|
||||
fullQueryReq := buildFullQueryRequest(req.Start, req.End, filterExpr, req.GroupBy, pageGroups, m.newVolumesTableListQuery())
|
||||
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Records = buildVolumeRecords(queryResp, pageGroups, req.GroupBy, metadataMap)
|
||||
resp.Warning = queryResp.Warning
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
128
pkg/modules/inframonitoring/implinframonitoring/volumes.go
Normal file
128
pkg/modules/inframonitoring/implinframonitoring/volumes.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// buildVolumeRecords assembles the page records. VolumeUsage is taken from the
|
||||
// formula query F1 = B - A (matches v1 record.VolumeUsage = capacity - available).
|
||||
// No per-row sub-counts (unlike pods/nodes/clusters).
|
||||
func buildVolumeRecords(
|
||||
resp *qbtypes.QueryRangeResponse,
|
||||
pageGroups []map[string]string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
metadataMap map[string]map[string]string,
|
||||
) []inframonitoringtypes.VolumeRecord {
|
||||
metricsMap := parseFullQueryResponse(resp, groupBy)
|
||||
|
||||
records := make([]inframonitoringtypes.VolumeRecord, 0, len(pageGroups))
|
||||
for _, labels := range pageGroups {
|
||||
compositeKey := compositeKeyFromLabels(labels, groupBy)
|
||||
pvcName := labels[persistentVolumeClaimNameAttrKey]
|
||||
|
||||
record := inframonitoringtypes.VolumeRecord{ // initialize with default values
|
||||
PersistentVolumeClaimName: pvcName,
|
||||
VolumeAvailable: -1,
|
||||
VolumeCapacity: -1,
|
||||
VolumeUsage: -1,
|
||||
VolumeInodes: -1,
|
||||
VolumeInodesFree: -1,
|
||||
VolumeInodesUsed: -1,
|
||||
Meta: map[string]string{},
|
||||
}
|
||||
|
||||
if metrics, ok := metricsMap[compositeKey]; ok {
|
||||
if v, exists := metrics["A"]; exists {
|
||||
record.VolumeAvailable = v
|
||||
}
|
||||
if v, exists := metrics["B"]; exists {
|
||||
record.VolumeCapacity = v
|
||||
}
|
||||
if v, exists := metrics["F1"]; exists {
|
||||
record.VolumeUsage = v
|
||||
}
|
||||
if v, exists := metrics["C"]; exists {
|
||||
record.VolumeInodes = v
|
||||
}
|
||||
if v, exists := metrics["D"]; exists {
|
||||
record.VolumeInodesFree = v
|
||||
}
|
||||
if v, exists := metrics["E"]; exists {
|
||||
record.VolumeInodesUsed = v
|
||||
}
|
||||
}
|
||||
|
||||
if attrs, ok := metadataMap[compositeKey]; ok {
|
||||
for k, v := range attrs {
|
||||
record.Meta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
records = append(records, record)
|
||||
}
|
||||
return records
|
||||
}
|
||||
|
||||
func (m *module) getTopVolumeGroups(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
req *inframonitoringtypes.PostableVolumes,
|
||||
metadataMap map[string]map[string]string,
|
||||
) ([]map[string]string, error) {
|
||||
orderByKey := req.OrderBy.Key.Name
|
||||
queryNamesForOrderBy := orderByToVolumesQueryNames[orderByKey]
|
||||
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
|
||||
|
||||
topReq := &qbtypes.QueryRangeRequest{
|
||||
Start: uint64(req.Start),
|
||||
End: uint64(req.End),
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, envelope := range m.newVolumesTableListQuery().CompositeQuery.Queries {
|
||||
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
|
||||
continue
|
||||
}
|
||||
copied := envelope
|
||||
if copied.Type == qbtypes.QueryTypeBuilder {
|
||||
existingExpr := ""
|
||||
if f := copied.GetFilter(); f != nil {
|
||||
existingExpr = f.Expression
|
||||
}
|
||||
reqFilterExpr := ""
|
||||
if req.Filter != nil {
|
||||
reqFilterExpr = req.Filter.Expression
|
||||
}
|
||||
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
|
||||
copied.SetFilter(&qbtypes.Filter{Expression: merged})
|
||||
copied.SetGroupBy(req.GroupBy)
|
||||
}
|
||||
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
|
||||
}
|
||||
|
||||
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
|
||||
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
|
||||
}
|
||||
|
||||
func (m *module) getVolumesTableMetadata(ctx context.Context, req *inframonitoringtypes.PostableVolumes) (map[string]map[string]string, error) {
|
||||
var nonGroupByAttrs []string
|
||||
for _, key := range volumeAttrKeysForMetadata {
|
||||
if !isKeyInGroupByAttrs(req.GroupBy, key) {
|
||||
nonGroupByAttrs = append(nonGroupByAttrs, key)
|
||||
}
|
||||
}
|
||||
return m.getMetadata(ctx, volumesTableMetricNamesList, req.GroupBy, nonGroupByAttrs, req.Filter, req.Start, req.End)
|
||||
}
|
||||
@@ -0,0 +1,192 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
const (
|
||||
persistentVolumeClaimNameAttrKey = "k8s.persistentvolumeclaim.name"
|
||||
volumesBaseFilterExpr = "k8s.persistentvolumeclaim.name != ''"
|
||||
)
|
||||
|
||||
var pvcNameGroupByKey = qbtypes.GroupByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: persistentVolumeClaimNameAttrKey,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
}
|
||||
|
||||
// volumesTableMetricNamesList drives the existence/retention check.
|
||||
var volumesTableMetricNamesList = []string{
|
||||
"k8s.volume.available",
|
||||
"k8s.volume.capacity",
|
||||
"k8s.volume.inodes",
|
||||
"k8s.volume.inodes.free",
|
||||
"k8s.volume.inodes.used",
|
||||
}
|
||||
|
||||
// Carried forward from v1 volumeAttrsToEnrich
|
||||
// (pkg/query-service/app/inframetrics/pvcs.go:23-31).
|
||||
var volumeAttrKeysForMetadata = []string{
|
||||
"k8s.persistentvolumeclaim.name",
|
||||
"k8s.pod.uid",
|
||||
"k8s.pod.name",
|
||||
"k8s.namespace.name",
|
||||
"k8s.node.name",
|
||||
"k8s.statefulset.name",
|
||||
"k8s.cluster.name",
|
||||
}
|
||||
|
||||
// orderByToVolumesQueryNames maps the orderBy column to the query/formula names
|
||||
// from newVolumesTableListQuery used for ranking volume groups. For "usage",
|
||||
// the formula F1 = B - A is the ranking column, with A and B carried as deps
|
||||
// (mirrors the hosts pattern in orderByToHostsQueryNames).
|
||||
var orderByToVolumesQueryNames = map[string][]string{
|
||||
inframonitoringtypes.VolumesOrderByAvailable: {"A"},
|
||||
inframonitoringtypes.VolumesOrderByCapacity: {"B"},
|
||||
inframonitoringtypes.VolumesOrderByUsage: {"A", "B", "F1"},
|
||||
inframonitoringtypes.VolumesOrderByInodes: {"C"},
|
||||
inframonitoringtypes.VolumesOrderByInodesFree: {"D"},
|
||||
inframonitoringtypes.VolumesOrderByInodesUsed: {"E"},
|
||||
}
|
||||
|
||||
// newVolumesTableListQuery builds the composite QB v5 request for the volumes list.
|
||||
// Five builder queries (A..E) cover the v1 volume metrics; formula F1 = B - A
|
||||
// derives usage = capacity - available (mirrors v1's F1 in PvcsTableListQuery).
|
||||
// Every builder query carries a base filter `k8s.persistentvolumeclaim.name != ”`.
|
||||
// Reason: the kubeletstats receiver emits `k8s.volume.*` metrics for every volume
|
||||
// mount on a pod (emptyDir, configMap, secret, projected, hostPath, ...), but
|
||||
// only PVC-backed volumes carry the `k8s.persistentvolumeclaim.name` resource
|
||||
// attribute (see https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/82f2e8798b42a13f07d733d073147b6eb279e0e1/receiver/kubeletstatsreceiver/internal/kubelet/volume.go#L25).
|
||||
// Without this filter, non-PVC volumes pollute the result and collapse into a single empty-string
|
||||
// group under the default groupBy. v1's PvcsTableListQuery applied the same filter.
|
||||
func (m *module) newVolumesTableListQuery() *qbtypes.QueryRangeRequest {
|
||||
queries := []qbtypes.QueryEnvelope{
|
||||
// Query A: k8s.volume.available
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.volume.available",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToLast,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: volumesBaseFilterExpr,
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{pvcNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query B: k8s.volume.capacity
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.volume.capacity",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToLast,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: volumesBaseFilterExpr,
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{pvcNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Formula F1: Volume Usage (capacity - available)
|
||||
{
|
||||
Type: qbtypes.QueryTypeFormula,
|
||||
Spec: qbtypes.QueryBuilderFormula{
|
||||
Name: "F1",
|
||||
Expression: "B - A",
|
||||
Legend: "Volume Usage",
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query C: k8s.volume.inodes
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "C",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.volume.inodes",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToLast,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: volumesBaseFilterExpr,
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{pvcNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query D: k8s.volume.inodes.free
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "D",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.volume.inodes.free",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToLast,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: volumesBaseFilterExpr,
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{pvcNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query E: k8s.volume.inodes.used
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "E",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "k8s.volume.inodes.used",
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToLast,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: volumesBaseFilterExpr,
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{pvcNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: queries,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@ type Handler interface {
|
||||
ListNodes(http.ResponseWriter, *http.Request)
|
||||
ListNamespaces(http.ResponseWriter, *http.Request)
|
||||
ListClusters(http.ResponseWriter, *http.Request)
|
||||
ListVolumes(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
type Module interface {
|
||||
@@ -22,4 +23,5 @@ type Module interface {
|
||||
ListNodes(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableNodes) (*inframonitoringtypes.Nodes, error)
|
||||
ListNamespaces(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableNamespaces) (*inframonitoringtypes.Namespaces, error)
|
||||
ListClusters(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableClusters) (*inframonitoringtypes.Clusters, error)
|
||||
ListVolumes(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.PostableVolumes) (*inframonitoringtypes.Volumes, error)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/quickfilter"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
@@ -15,11 +14,10 @@ type setter struct {
|
||||
store types.OrganizationStore
|
||||
alertmanager alertmanager.Alertmanager
|
||||
quickfilter quickfilter.Module
|
||||
dashboard dashboard.Module
|
||||
}
|
||||
|
||||
func NewSetter(store types.OrganizationStore, alertmanager alertmanager.Alertmanager, quickfilter quickfilter.Module, dashboard dashboard.Module) organization.Setter {
|
||||
return &setter{store: store, alertmanager: alertmanager, quickfilter: quickfilter, dashboard: dashboard}
|
||||
func NewSetter(store types.OrganizationStore, alertmanager alertmanager.Alertmanager, quickfilter quickfilter.Module) organization.Setter {
|
||||
return &setter{store: store, alertmanager: alertmanager, quickfilter: quickfilter}
|
||||
}
|
||||
|
||||
func (module *setter) Create(ctx context.Context, organization *types.Organization, createManagedRoles func(context.Context, valuer.UUID) error) error {
|
||||
@@ -35,10 +33,6 @@ func (module *setter) Create(ctx context.Context, organization *types.Organizati
|
||||
return err
|
||||
}
|
||||
|
||||
if err := module.dashboard.SetDefaultConfig(ctx, organization.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := createManagedRoles(ctx, organization.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -512,7 +512,6 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
router.HandleFunc("/api/v1/dashboards/{id}", am.EditAccess(aH.Signoz.Handlers.Dashboard.Update)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v1/dashboards/{id}", am.EditAccess(aH.Signoz.Handlers.Dashboard.Delete)).Methods(http.MethodDelete)
|
||||
router.HandleFunc("/api/v1/dashboards/{id}/lock", am.EditAccess(aH.Signoz.Handlers.Dashboard.LockUnlock)).Methods(http.MethodPut)
|
||||
router.HandleFunc("/api/v1/dashboards/{source}/reset", am.AdminAccess(aH.Signoz.Handlers.Dashboard.ResetSystemDashboard)).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v2/variables/query", am.ViewAccess(aH.queryDashboardVarsV2)).Methods(http.MethodPost)
|
||||
|
||||
router.HandleFunc("/api/v1/explorer/views", am.ViewAccess(aH.Signoz.Handlers.SavedView.List)).Methods(http.MethodGet)
|
||||
|
||||
@@ -115,7 +115,7 @@ func NewModules(
|
||||
fl flagger.Flagger,
|
||||
) Modules {
|
||||
quickfilter := implquickfilter.NewModule(implquickfilter.NewStore(sqlstore))
|
||||
orgSetter := implorganization.NewSetter(implorganization.NewStore(sqlstore), alertmanager, quickfilter, dashboard)
|
||||
orgSetter := implorganization.NewSetter(implorganization.NewStore(sqlstore), alertmanager, quickfilter)
|
||||
userSetter := impluser.NewSetter(impluser.NewStore(sqlstore, providerSettings), tokenizer, emailing, providerSettings, orgSetter, authz, analytics, config.User, userRoleStore, userGetter)
|
||||
ruleStore := sqlrulestore.NewRuleStore(sqlstore, queryParser, providerSettings)
|
||||
|
||||
|
||||
@@ -200,7 +200,6 @@ func NewSQLMigrationProviderFactories(
|
||||
sqlmigration.NewAddServiceAccountManagedRoleTransactionsFactory(sqlstore),
|
||||
sqlmigration.NewAddSpanMapperFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewAddLLMPricingRulesFactory(sqlstore, sqlschema),
|
||||
sqlmigration.NewAddSystemDashboardFactory(sqlstore, sqlschema),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
package sqlmigration
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/migrate"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
)
|
||||
|
||||
type addSystemDashboard struct {
|
||||
sqlstore sqlstore.SQLStore
|
||||
sqlschema sqlschema.SQLSchema
|
||||
}
|
||||
|
||||
func NewAddSystemDashboardFactory(sqlstore sqlstore.SQLStore, sqlschema sqlschema.SQLSchema) factory.ProviderFactory[SQLMigration, Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("add_system_dashboard"), func(ctx context.Context, ps factory.ProviderSettings, c Config) (SQLMigration, error) {
|
||||
return &addSystemDashboard{sqlstore: sqlstore, sqlschema: sqlschema}, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (migration *addSystemDashboard) Register(migrations *migrate.Migrations) error {
|
||||
if err := migrations.Register(migration.Up, migration.Down); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addSystemDashboard) Up(ctx context.Context, db *bun.DB) error {
|
||||
// Disable foreign keys for the duration of the migration.
|
||||
// pattern is used in migration 029_drop_groups.
|
||||
if err := migration.sqlstore.Dialect().ToggleForeignKeyConstraint(ctx, db, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tx, err := db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = tx.Rollback()
|
||||
}()
|
||||
|
||||
table, uniqueConstraints, err := migration.sqlschema.GetTable(ctx, "dashboard")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
column := &sqlschema.Column{
|
||||
Name: sqlschema.ColumnName("source"),
|
||||
DataType: sqlschema.DataTypeText,
|
||||
Nullable: false,
|
||||
}
|
||||
|
||||
sqls := migration.sqlschema.Operator().AddColumn(table, uniqueConstraints, column, "")
|
||||
for _, sql := range sqls {
|
||||
if _, err := tx.ExecContext(ctx, string(sql)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We activate this part of code once we add default value for the overview page.
|
||||
// add source column to the dashboard table.
|
||||
// do not iterate over orgs.
|
||||
// var orgIDs []string
|
||||
// if err := tx.NewSelect().Model((*types.Organization)(nil)).Column("id").Scan(ctx, &orgIDs); err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// for _, rawOrgID := range orgIDs {
|
||||
// orgID, err := valuer.NewUUID(rawOrgID)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// for _, source := range dashboardtypes.SystemSources {
|
||||
// count, err := tx.NewSelect().
|
||||
// Model((*dashboardtypes.StorableDashboard)(nil)).
|
||||
// Where("org_id = ?", orgID).
|
||||
// Where("source = ?", string(source)).
|
||||
// Count(ctx)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if count > 0 {
|
||||
// continue
|
||||
// }
|
||||
//
|
||||
// dashboard, err := dashboardtypes.NewDefaultSystemDashboard(orgID, source)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// storable, err := dashboardtypes.NewStorableDashboardFromDashboard(dashboard)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
//
|
||||
// if _, err := tx.NewInsert().Model(storable).Exec(ctx); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Re-enable foreign keys.
|
||||
if err := migration.sqlstore.Dialect().ToggleForeignKeyConstraint(ctx, db, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (migration *addSystemDashboard) Down(context.Context, *bun.DB) error {
|
||||
return nil
|
||||
}
|
||||
@@ -30,7 +30,6 @@ type StorableDashboard struct {
|
||||
Data StorableDashboardData `bun:"data,type:text,notnull"`
|
||||
Locked bool `bun:"locked,notnull,default:false"`
|
||||
OrgID valuer.UUID `bun:"org_id,notnull"`
|
||||
Source string `bun:"source,type:text,notnull"`
|
||||
}
|
||||
|
||||
type Dashboard struct {
|
||||
@@ -41,7 +40,6 @@ type Dashboard struct {
|
||||
Data StorableDashboardData `json:"data"`
|
||||
Locked bool `json:"locked"`
|
||||
OrgID valuer.UUID `json:"org_id"`
|
||||
Source string `json:"source"`
|
||||
}
|
||||
|
||||
type LockUnlockDashboard struct {
|
||||
@@ -81,11 +79,10 @@ func NewStorableDashboardFromDashboard(dashboard *Dashboard) (*StorableDashboard
|
||||
OrgID: dashboard.OrgID,
|
||||
Data: dashboard.Data,
|
||||
Locked: dashboard.Locked,
|
||||
Source: dashboard.Source,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewDashboard(orgID valuer.UUID, createdBy string, data StorableDashboardData, source Source) (*Dashboard, error) {
|
||||
func NewDashboard(orgID valuer.UUID, createdBy string, storableDashboardData StorableDashboardData) (*Dashboard, error) {
|
||||
currentTime := time.Now()
|
||||
|
||||
return &Dashboard{
|
||||
@@ -99,9 +96,8 @@ func NewDashboard(orgID valuer.UUID, createdBy string, data StorableDashboardDat
|
||||
UpdatedBy: createdBy,
|
||||
},
|
||||
OrgID: orgID,
|
||||
Data: data,
|
||||
Data: storableDashboardData,
|
||||
Locked: false,
|
||||
Source: string(source),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -119,7 +115,6 @@ func NewDashboardFromStorableDashboard(storableDashboard *StorableDashboard) *Da
|
||||
OrgID: storableDashboard.OrgID,
|
||||
Data: storableDashboard.Data,
|
||||
Locked: storableDashboard.Locked,
|
||||
Source: storableDashboard.Source,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -152,7 +147,6 @@ func NewGettableDashboardFromDashboard(dashboard *Dashboard) (*GettableDashboard
|
||||
OrgID: dashboard.OrgID,
|
||||
Data: dashboard.Data,
|
||||
Locked: dashboard.Locked,
|
||||
Source: dashboard.Source,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -284,16 +278,6 @@ func (dashboard *Dashboard) Update(ctx context.Context, updatableDashboard Updat
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dashboard *Dashboard) CanDelete() error {
|
||||
if dashboard.Source != "" {
|
||||
return errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "cannot delete system dashboard with source %s", dashboard.Source)
|
||||
}
|
||||
if dashboard.Locked {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "dashboard is locked, please unlock the dashboard to delete it")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dashboard *Dashboard) CanLockUnlock(isAdmin bool, updatedBy string) error {
|
||||
if dashboard.CreatedBy != updatedBy && !isAdmin {
|
||||
return errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "you are not authorized to lock/unlock this dashboard")
|
||||
|
||||
@@ -66,7 +66,7 @@ func TestCanUpdate_MultipleDeletions_ByDiff(t *testing.T) {
|
||||
initial := StorableDashboardData{
|
||||
"widgets": makeTestWidgets("a", "b", "c"),
|
||||
}
|
||||
d, err := NewDashboard(orgID, "tester", initial, "")
|
||||
d, err := NewDashboard(orgID, "tester", initial)
|
||||
assert.NoError(t, err)
|
||||
|
||||
updated := StorableDashboardData{
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
package dashboardtypes
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Source string
|
||||
|
||||
var SystemSources = []Source{}
|
||||
|
||||
// This will be fixed with upcoming pr of dashboard default value.
|
||||
// No issue with migration or org creation; if reset endpoint is called we get a 400
|
||||
// invalid_input response and no action is taken on dashboard.data.
|
||||
func NewDefaultSystemDashboard(orgID valuer.UUID, source Source) (*Dashboard, error) {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "no defaults registered for system dashboard source %s", source)
|
||||
}
|
||||
@@ -13,8 +13,6 @@ type Store interface {
|
||||
|
||||
Get(context.Context, valuer.UUID, valuer.UUID) (*StorableDashboard, error)
|
||||
|
||||
GetBySource(context.Context, valuer.UUID, string) (*StorableDashboard, error)
|
||||
|
||||
GetPublic(context.Context, string) (*StorablePublicDashboard, error)
|
||||
|
||||
GetDashboardByOrgsAndPublicID(context.Context, []string, string) (*StorableDashboard, error)
|
||||
|
||||
102
pkg/types/inframonitoringtypes/volumes.go
Normal file
102
pkg/types/inframonitoringtypes/volumes.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
type Volumes struct {
|
||||
Type ResponseType `json:"type" required:"true"`
|
||||
Records []VolumeRecord `json:"records" required:"true"`
|
||||
Total int `json:"total" required:"true"`
|
||||
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck" required:"true"`
|
||||
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention" required:"true"`
|
||||
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
|
||||
}
|
||||
|
||||
type VolumeRecord struct {
|
||||
PersistentVolumeClaimName string `json:"persistentVolumeClaimName" required:"true"`
|
||||
VolumeAvailable float64 `json:"volumeAvailable" required:"true"`
|
||||
VolumeCapacity float64 `json:"volumeCapacity" required:"true"`
|
||||
VolumeUsage float64 `json:"volumeUsage" required:"true"`
|
||||
VolumeInodes float64 `json:"volumeInodes" required:"true"`
|
||||
VolumeInodesFree float64 `json:"volumeInodesFree" required:"true"`
|
||||
VolumeInodesUsed float64 `json:"volumeInodesUsed" required:"true"`
|
||||
Meta map[string]string `json:"meta" required:"true"`
|
||||
}
|
||||
|
||||
// PostableVolumes is the request body for the v2 volumes (PVCs) list API.
|
||||
type PostableVolumes struct {
|
||||
Start int64 `json:"start" required:"true"`
|
||||
End int64 `json:"end" required:"true"`
|
||||
Filter *qbtypes.Filter `json:"filter"`
|
||||
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
|
||||
OrderBy *qbtypes.OrderBy `json:"orderBy"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit" required:"true"`
|
||||
}
|
||||
|
||||
// Validate ensures PostableVolumes contains acceptable values.
|
||||
func (req *PostableVolumes) Validate() error {
|
||||
if req == nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
|
||||
}
|
||||
|
||||
if req.Start <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid start time %d: start must be greater than 0",
|
||||
req.Start,
|
||||
)
|
||||
}
|
||||
|
||||
if req.End <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid end time %d: end must be greater than 0",
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Start >= req.End {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time range: start (%d) must be less than end (%d)",
|
||||
req.Start,
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Limit < 1 || req.Limit > 5000 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
|
||||
}
|
||||
|
||||
if req.Offset < 0 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
|
||||
}
|
||||
|
||||
if req.OrderBy != nil {
|
||||
if !slices.Contains(VolumesValidOrderByKeys, req.OrderBy.Key.Name) {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
|
||||
}
|
||||
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON validates input immediately after decoding.
|
||||
func (req *PostableVolumes) UnmarshalJSON(data []byte) error {
|
||||
type raw PostableVolumes
|
||||
var decoded raw
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
*req = PostableVolumes(decoded)
|
||||
return req.Validate()
|
||||
}
|
||||
19
pkg/types/inframonitoringtypes/volumes_constants.go
Normal file
19
pkg/types/inframonitoringtypes/volumes_constants.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
const (
|
||||
VolumesOrderByAvailable = "available"
|
||||
VolumesOrderByCapacity = "capacity"
|
||||
VolumesOrderByUsage = "usage"
|
||||
VolumesOrderByInodes = "inodes"
|
||||
VolumesOrderByInodesFree = "inodes_free"
|
||||
VolumesOrderByInodesUsed = "inodes_used"
|
||||
)
|
||||
|
||||
var VolumesValidOrderByKeys = []string{
|
||||
VolumesOrderByAvailable,
|
||||
VolumesOrderByCapacity,
|
||||
VolumesOrderByUsage,
|
||||
VolumesOrderByInodes,
|
||||
VolumesOrderByInodesFree,
|
||||
VolumesOrderByInodesUsed,
|
||||
}
|
||||
237
pkg/types/inframonitoringtypes/volumes_test.go
Normal file
237
pkg/types/inframonitoringtypes/volumes_test.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPostableVolumes_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req *PostableVolumes
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil request",
|
||||
req: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time zero",
|
||||
req: &PostableVolumes{
|
||||
Start: 0,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time negative",
|
||||
req: &PostableVolumes{
|
||||
Start: -1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "end time zero",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 0,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time greater than end time",
|
||||
req: &PostableVolumes{
|
||||
Start: 2000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time equal to end time",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit zero",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 0,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit negative",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: -10,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit exceeds max",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 5001,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "offset negative",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: -5,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy nil is valid",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key usage and direction desc",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: VolumesOrderByUsage,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key available and direction asc",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: VolumesOrderByAvailable,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key inodes_free",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: VolumesOrderByInodesFree,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with invalid key",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "unknown",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key but invalid direction",
|
||||
req: &PostableVolumes{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: VolumesOrderByCapacity,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.req.Validate()
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,7 @@ pytest_plugins = [
|
||||
"fixtures.seeder",
|
||||
"fixtures.serviceaccount",
|
||||
"fixtures.role",
|
||||
"fixtures.seed_golden_dataset",
|
||||
]
|
||||
|
||||
|
||||
|
||||
13
tests/e2e/bootstrap/global.setup.ts
Normal file
13
tests/e2e/bootstrap/global.setup.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { expect, test as setup } from '@playwright/test';
|
||||
|
||||
const seederUrl = process.env.SIGNOZ_E2E_SEEDER_URL ?? '';
|
||||
|
||||
setup('refresh golden dataset', async ({ request }) => {
|
||||
expect(seederUrl, 'SIGNOZ_E2E_SEEDER_URL not set').not.toBe('');
|
||||
const response = await request.post(`${seederUrl}/seed/golden`, {
|
||||
timeout: 120_000,
|
||||
});
|
||||
expect(response.ok()).toBeTruthy();
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`[setup] refreshed golden dataset: ${await response.text()}`);
|
||||
});
|
||||
14
tests/e2e/bootstrap/global.teardown.ts
Normal file
14
tests/e2e/bootstrap/global.teardown.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
import { expect, test as teardown } from '@playwright/test';
|
||||
|
||||
const seederUrl = process.env.SIGNOZ_E2E_SEEDER_URL ?? '';
|
||||
|
||||
teardown('clear seeded telemetry', async ({ request }) => {
|
||||
expect(seederUrl, 'SIGNOZ_E2E_SEEDER_URL not set').not.toBe('');
|
||||
for (const signal of ['metrics', 'traces', 'logs'] as const) {
|
||||
const response = await request.delete(
|
||||
`${seederUrl}/telemetry/${signal}`,
|
||||
{ timeout: 60_000 },
|
||||
);
|
||||
expect(response.ok()).toBeTruthy();
|
||||
}
|
||||
});
|
||||
@@ -2,6 +2,7 @@ import os
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
@@ -38,6 +39,13 @@ def test_teardown(
|
||||
signoz: types.SigNoz, # pylint: disable=unused-argument
|
||||
create_user_admin: types.Operation, # pylint: disable=unused-argument
|
||||
apply_license: types.Operation, # pylint: disable=unused-argument
|
||||
seeder: types.TestContainerDocker, # pylint: disable=unused-argument
|
||||
seeder: types.TestContainerDocker,
|
||||
) -> None:
|
||||
"""Fixture dependencies trigger container teardown via --teardown."""
|
||||
"""Truncate seeded telemetry; containers come down via fixture
|
||||
dependency under `--teardown`."""
|
||||
base = seeder.host_configs["8080"].base().rstrip("/")
|
||||
for signal in ("metrics", "traces", "logs"):
|
||||
try:
|
||||
requests.delete(f"{base}/telemetry/{signal}", timeout=30).raise_for_status()
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
print(f"seeder DELETE /telemetry/{signal} failed: {e}")
|
||||
|
||||
@@ -41,7 +41,6 @@
|
||||
"typescript": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.0.0",
|
||||
"yarn": ">=1.22.0"
|
||||
"node": ">=18.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import path from 'path';
|
||||
// .env.local is written by tests/e2e/bootstrap/setup.py when the pytest
|
||||
// lifecycle brings the backend up locally; override=true so local-backend
|
||||
// coordinates win over any stale .env values. Subprocess-injected env
|
||||
// (e.g. when pytest shells out to `yarn test`) still takes priority —
|
||||
// (e.g. when pytest shells out to `pnpm test`) still takes priority —
|
||||
// dotenv doesn't touch vars that are already set in process.env.
|
||||
dotenv.config({ path: path.resolve(__dirname, '.env') });
|
||||
dotenv.config({ path: path.resolve(__dirname, '.env.local'), override: true });
|
||||
@@ -50,12 +50,36 @@ export default defineConfig({
|
||||
viewport: { width: 1280, height: 720 },
|
||||
},
|
||||
|
||||
// Browser projects. No project-level auth — specs opt in via the
|
||||
// authedPage fixture in tests/e2e/fixtures/auth.ts, which logs a user
|
||||
// in on first use and caches the resulting storageState per worker.
|
||||
// `setup` runs `bootstrap/global.setup.ts` once before any browser
|
||||
// project — refreshes the golden dataset so chart-data assertions
|
||||
// land inside default panel time windows. Per
|
||||
// https://playwright.dev/docs/test-global-setup-teardown#option-1-project-dependencies.
|
||||
projects: [
|
||||
{ name: 'chromium', use: devices['Desktop Chrome'] },
|
||||
{ name: 'firefox', use: devices['Desktop Firefox'] },
|
||||
{ name: 'webkit', use: devices['Desktop Safari'] },
|
||||
{
|
||||
name: 'setup',
|
||||
testDir: './bootstrap',
|
||||
testMatch: /global\.setup\.ts/,
|
||||
teardown: 'teardown',
|
||||
},
|
||||
{
|
||||
name: 'teardown',
|
||||
testDir: './bootstrap',
|
||||
testMatch: /global\.teardown\.ts/,
|
||||
},
|
||||
{
|
||||
name: 'chromium',
|
||||
use: devices['Desktop Chrome'],
|
||||
dependencies: ['setup'],
|
||||
},
|
||||
{
|
||||
name: 'firefox',
|
||||
use: devices['Desktop Firefox'],
|
||||
dependencies: ['setup'],
|
||||
},
|
||||
{
|
||||
name: 'webkit',
|
||||
use: devices['Desktop Safari'],
|
||||
dependencies: ['setup'],
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
1138
tests/e2e/pnpm-lock.yaml
generated
Normal file
1138
tests/e2e/pnpm-lock.yaml
generated
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user