Compare commits
355 Commits
playwright
...
ns/claude-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08c53fe7e8 | ||
|
|
c1fac00d2e | ||
|
|
15161c09e8 | ||
|
|
ee5fbe41eb | ||
|
|
f2f3a7b24a | ||
|
|
dd0738ac70 | ||
|
|
1c4dfc931f | ||
|
|
605d6ba17d | ||
|
|
f017b07525 | ||
|
|
b5901ac174 | ||
|
|
caacbc086c | ||
|
|
9d06ccab48 | ||
|
|
de45292782 | ||
|
|
9f38305e5a | ||
|
|
e1c8b68cd2 | ||
|
|
76ec089a43 | ||
|
|
4117c7442b | ||
|
|
4153515767 | ||
|
|
6a0ee32616 | ||
|
|
1f13b60703 | ||
|
|
0865d2edaf | ||
|
|
8629c959f0 | ||
|
|
10760e6e1b | ||
|
|
4f45645b32 | ||
|
|
1417e22ae4 | ||
|
|
3051d442c0 | ||
|
|
ea15ce4e04 | ||
|
|
865a7a5a31 | ||
|
|
de4ca50a40 | ||
|
|
8cabaafc58 | ||
|
|
e9d66b8094 | ||
|
|
26d3d6b1e4 | ||
|
|
36d6debeab | ||
|
|
445b0cace8 | ||
|
|
132f10f8a3 | ||
|
|
14011bc277 | ||
|
|
f17a332c23 | ||
|
|
5ae7a464e6 | ||
|
|
51c3628f6e | ||
|
|
6a69076828 | ||
|
|
edd04e2f07 | ||
|
|
ee734cf78c | ||
|
|
6d137bcdff | ||
|
|
444161671d | ||
|
|
31e9e896ec | ||
|
|
325974292f | ||
|
|
7c1a531d01 | ||
|
|
5a45532a72 | ||
|
|
e9501d2e0f | ||
|
|
c306e66bcd | ||
|
|
767a0cc28e | ||
|
|
4f9efcc133 | ||
|
|
bf2dd612e0 | ||
|
|
d3f15022a4 | ||
|
|
a5c021e96c | ||
|
|
8b9fcae0cb | ||
|
|
e75abcb108 | ||
|
|
ffa15f9ec1 | ||
|
|
87562f5387 | ||
|
|
11a7512a25 | ||
|
|
f925a15f30 | ||
|
|
a3fe2c2589 | ||
|
|
2719c9b6a7 | ||
|
|
b4282be3ac | ||
|
|
15fe3a7163 | ||
|
|
30b98582b4 | ||
|
|
99b9e27bca | ||
|
|
7e72f501a7 | ||
|
|
97d7b81a73 | ||
|
|
9c5b50ae3a | ||
|
|
fd13c6dee4 | ||
|
|
a894b08255 | ||
|
|
2c6c034e60 | ||
|
|
0a81bf8060 | ||
|
|
b38ffce7f2 | ||
|
|
696d83eeaf | ||
|
|
d98851dcf2 | ||
|
|
a103bb453d | ||
|
|
abae0c7f27 | ||
|
|
0cc1fb0edb | ||
|
|
f2ef42bd7f | ||
|
|
22158a3cee | ||
|
|
e9afbede24 | ||
|
|
479cba7dd2 | ||
|
|
5449374ad8 | ||
|
|
68b9cc2b81 | ||
|
|
0f62a04f92 | ||
|
|
b4706743ba | ||
|
|
1eba57b250 | ||
|
|
c9cbc8d9ad | ||
|
|
23ba9dacd1 | ||
|
|
fce1cce02e | ||
|
|
f8b3cac191 | ||
|
|
09e6342a2a | ||
|
|
2d343cde38 | ||
|
|
815c60485d | ||
|
|
42c38d5d17 | ||
|
|
f214bd1dbf | ||
|
|
7163df599b | ||
|
|
a5ba770637 | ||
|
|
8f7c0b07a7 | ||
|
|
b744e228c7 | ||
|
|
4853bb1f8c | ||
|
|
cf693582a5 | ||
|
|
a9e47c3779 | ||
|
|
d04c07a887 | ||
|
|
c36702714e | ||
|
|
3cb6f6704d | ||
|
|
14ab4c9b79 | ||
|
|
9aa0073fef | ||
|
|
e124a6a269 | ||
|
|
666bfa7a0f | ||
|
|
c547ba28e5 | ||
|
|
10f8616d47 | ||
|
|
3718f6da59 | ||
|
|
4882d7d524 | ||
|
|
a0b722887d | ||
|
|
0b890154b4 | ||
|
|
d0ef7b181e | ||
|
|
19c6aead54 | ||
|
|
032ac75932 | ||
|
|
c6f5a19256 | ||
|
|
a4ce770941 | ||
|
|
201d5c24a5 | ||
|
|
ab8b42fbbe | ||
|
|
f99821bc40 | ||
|
|
7c051601f2 | ||
|
|
b9f9c00da5 | ||
|
|
49ff86e65a | ||
|
|
2dc6febb38 | ||
|
|
4ae268d867 | ||
|
|
9d78d67461 | ||
|
|
055d0ba90d | ||
|
|
09dc95cfe9 | ||
|
|
d218cd5733 | ||
|
|
f6da9adb86 | ||
|
|
c82f54b548 | ||
|
|
dba038c6e0 | ||
|
|
bca761498a | ||
|
|
0e6bd90fdf | ||
|
|
f3256aeac4 | ||
|
|
c9f1526e33 | ||
|
|
dba536578b | ||
|
|
15ceb228fa | ||
|
|
6b3c6fc722 | ||
|
|
edcae53b64 | ||
|
|
72fda90ec2 | ||
|
|
8acfc3c9f7 | ||
|
|
463ae443f9 | ||
|
|
f72535a15f | ||
|
|
e21e99ce64 | ||
|
|
d1559a3262 | ||
|
|
1ccb9bb4c2 | ||
|
|
0c059df327 | ||
|
|
8a5539679c | ||
|
|
89b188f73d | ||
|
|
bb4d6117ac | ||
|
|
1110864549 | ||
|
|
5cb515cade | ||
|
|
41d5f6a00c | ||
|
|
61ec1ef28e | ||
|
|
529a9e7009 | ||
|
|
b00687b43f | ||
|
|
8771919de6 | ||
|
|
497972f23c | ||
|
|
a9e30919d1 | ||
|
|
925c4c4a3d | ||
|
|
e66bfe5961 | ||
|
|
42943f72b7 | ||
|
|
7a72a209e5 | ||
|
|
44f00943a8 | ||
|
|
8867e1ef38 | ||
|
|
c08e520941 | ||
|
|
139cc4452d | ||
|
|
2f3baeb302 | ||
|
|
3d42b0058e | ||
|
|
ed70e3c5f5 | ||
|
|
7d6918f8b6 | ||
|
|
2885bc851e | ||
|
|
857258f8c3 | ||
|
|
ece5c2b7ad | ||
|
|
1078f98388 | ||
|
|
b4e2326f38 | ||
|
|
c79b154215 | ||
|
|
a59c0188cc | ||
|
|
3df426625a | ||
|
|
646f359f33 | ||
|
|
81167c6947 | ||
|
|
bc1295b93a | ||
|
|
3db0e1f66a | ||
|
|
d52b54aeb3 | ||
|
|
c8608c18ae | ||
|
|
cde99ba1a0 | ||
|
|
a7e9d442b7 | ||
|
|
0b0d622f6b | ||
|
|
127e760b00 | ||
|
|
63e333de0d | ||
|
|
af57d11b6a | ||
|
|
8d61ee338b | ||
|
|
5d9dc17645 | ||
|
|
5288022ffd | ||
|
|
cdc18af4a2 | ||
|
|
918a90e3c1 | ||
|
|
e8ce7b22f5 | ||
|
|
b752fdd30a | ||
|
|
d73b7fadab | ||
|
|
bc4b65dbb9 | ||
|
|
e716a2a7b1 | ||
|
|
891c56b059 | ||
|
|
d01e6fc891 | ||
|
|
17f8c1040f | ||
|
|
ffa5a9725e | ||
|
|
92cab8e049 | ||
|
|
7b9e6e3cbb | ||
|
|
4837ddb601 | ||
|
|
9c818955af | ||
|
|
134a051196 | ||
|
|
c904ab5d99 | ||
|
|
d53f9a7e16 | ||
|
|
1b01b61026 | ||
|
|
95a26cecba | ||
|
|
15af828005 | ||
|
|
e5b99703ac | ||
|
|
f0941c7b2e | ||
|
|
12c9b921a7 | ||
|
|
52228bc6c4 | ||
|
|
79988b448f | ||
|
|
4bfd7ba3d7 | ||
|
|
3349158213 | ||
|
|
1c9f4efb9f | ||
|
|
fd839ff1db | ||
|
|
09cbe4aa0d | ||
|
|
096e38ee91 | ||
|
|
48590c03e2 | ||
|
|
38af897bcc | ||
|
|
2b79678e63 | ||
|
|
a4f54baf1f | ||
|
|
4e6c42dd17 | ||
|
|
39bd169b89 | ||
|
|
c7c2d2a7ef | ||
|
|
0cfb809605 | ||
|
|
6a378ed7b4 | ||
|
|
8e41847523 | ||
|
|
779df62093 | ||
|
|
3763794531 | ||
|
|
e9fa68e1f3 | ||
|
|
7bd3e1c453 | ||
|
|
a48455b2b3 | ||
|
|
fbb66f14ba | ||
|
|
54b67d9cfd | ||
|
|
1a193015a7 | ||
|
|
245179cbf7 | ||
|
|
dbb6b333c8 | ||
|
|
56f8e53d88 | ||
|
|
2f4e371dac | ||
|
|
db75ec56bc | ||
|
|
02755a6527 | ||
|
|
9f089e0784 | ||
|
|
fb9a7ad3cd | ||
|
|
ad631d70b6 | ||
|
|
c44efeab33 | ||
|
|
e9743fa7ac | ||
|
|
b7ece08d3e | ||
|
|
e5f4f5cc72 | ||
|
|
4437630127 | ||
|
|
89639b239e | ||
|
|
785ae9f0bd | ||
|
|
8752022cef | ||
|
|
c7e4a9c45d | ||
|
|
bf92c92204 | ||
|
|
bd63633be7 | ||
|
|
1158e1199b | ||
|
|
0a60c49314 | ||
|
|
c25e3beb81 | ||
|
|
c9e0f2b9ca | ||
|
|
6d831849c1 | ||
|
|
83eeb46f99 | ||
|
|
287558dc9d | ||
|
|
83aad793c2 | ||
|
|
3eff689c85 | ||
|
|
f5bcd65e2e | ||
|
|
e7772d93af | ||
|
|
bbf987ebd7 | ||
|
|
105c3a3b8c | ||
|
|
c1a4a5b8db | ||
|
|
c9591f4341 | ||
|
|
fd216fdee1 | ||
|
|
f5bf4293a1 | ||
|
|
155a44a25d | ||
|
|
4b21c9d5f9 | ||
|
|
5ef0a18867 | ||
|
|
c8266d1aec | ||
|
|
adfd16ce1b | ||
|
|
6db74a5585 | ||
|
|
f8e0db0085 | ||
|
|
01e0b36d62 | ||
|
|
e90bb016f7 | ||
|
|
bdecbfb7f5 | ||
|
|
3dced2b082 | ||
|
|
1285666087 | ||
|
|
1655397eaa | ||
|
|
718360a966 | ||
|
|
2f5995b071 | ||
|
|
a061c9de0f | ||
|
|
7b1ca9a1a6 | ||
|
|
0d1131e99f | ||
|
|
44d1d0f994 | ||
|
|
bdce97a727 | ||
|
|
5f8cfbe474 | ||
|
|
55c2f98768 | ||
|
|
624bb5cc62 | ||
|
|
95f8fa1566 | ||
|
|
fa97e63912 | ||
|
|
c8419c1f82 | ||
|
|
e05ede3978 | ||
|
|
437d0d1345 | ||
|
|
64e379c413 | ||
|
|
d05d394f57 | ||
|
|
b4e5085a5a | ||
|
|
88f7502a15 | ||
|
|
b0442761ac | ||
|
|
d539ca9bab | ||
|
|
c8194e9abb | ||
|
|
c919102fee | ||
|
|
765370752c | ||
|
|
db5c102f14 | ||
|
|
8a0c5bc3c8 | ||
|
|
a28ccffd01 | ||
|
|
84adb3e163 | ||
|
|
6d416061f2 | ||
|
|
b8cf0a3041 | ||
|
|
1793026c78 | ||
|
|
c122bc09b4 | ||
|
|
d22039b1a1 | ||
|
|
457b6970b1 | ||
|
|
1267f9ad6e | ||
|
|
ecd9498970 | ||
|
|
bac8f8b211 | ||
|
|
800a34e625 | ||
|
|
934c09b36b | ||
|
|
c62d41edf0 | ||
|
|
264af06ca0 | ||
|
|
dcc902fb27 | ||
|
|
a4f24a231b | ||
|
|
416e8d2a5e | ||
|
|
43a6c7dcd6 | ||
|
|
5005cae2ad | ||
|
|
3123447005 | ||
|
|
6c59b5405e | ||
|
|
d26b57b0d8 | ||
|
|
da17375f10 | ||
|
|
a96489d06e | ||
|
|
8c29debb52 | ||
|
|
9cebd49a2c | ||
|
|
a22ef64bb0 |
136
.claude/CLAUDE.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
SigNoz is an open-source observability platform (APM, logs, metrics, traces) built on OpenTelemetry and ClickHouse. It provides a unified solution for monitoring applications with features including distributed tracing, log management, metrics dashboards, and alerting.
|
||||
|
||||
## Build and Development Commands
|
||||
|
||||
### Development Environment Setup
|
||||
```bash
|
||||
make devenv-up # Start ClickHouse and OTel Collector for local dev
|
||||
make devenv-clickhouse # Start only ClickHouse
|
||||
make devenv-signoz-otel-collector # Start only OTel Collector
|
||||
make devenv-clickhouse-clean # Clean ClickHouse data
|
||||
```
|
||||
|
||||
### Backend (Go)
|
||||
```bash
|
||||
make go-run-community # Run community backend server
|
||||
make go-run-enterprise # Run enterprise backend server
|
||||
make go-test # Run all Go unit tests
|
||||
go test -race ./pkg/... # Run tests for specific package
|
||||
go test -race ./pkg/querier/... # Example: run querier tests
|
||||
```
|
||||
|
||||
### Integration Tests (Python)
|
||||
```bash
|
||||
cd tests/integration
|
||||
uv sync # Install dependencies
|
||||
make py-test-setup # Start test environment (keep running with --reuse)
|
||||
make py-test # Run all integration tests
|
||||
make py-test-teardown # Stop test environment
|
||||
|
||||
# Run specific test
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse src/<suite>/<file>.py::test_name
|
||||
```
|
||||
|
||||
### Code Quality
|
||||
```bash
|
||||
# Go linting (golangci-lint)
|
||||
golangci-lint run
|
||||
|
||||
# Python formatting/linting
|
||||
make py-fmt # Format with black
|
||||
make py-lint # Run isort, autoflake, pylint
|
||||
```
|
||||
|
||||
### OpenAPI Generation
|
||||
```bash
|
||||
go run cmd/enterprise/*.go generate openapi
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Backend Structure
|
||||
|
||||
The Go backend follows a **provider pattern** for dependency injection:
|
||||
|
||||
- **`pkg/signoz/`** - IoC container that wires all providers together
|
||||
- **`pkg/modules/`** - Business logic modules (user, organization, dashboard, etc.)
|
||||
- **`pkg/<provider>/`** - Provider implementations following consistent structure:
|
||||
- `<name>.go` - Interface definition
|
||||
- `config.go` - Configuration (implements `factory.Config`)
|
||||
- `<implname><name>/provider.go` - Implementation
|
||||
- `<name>test/` - Mock implementations for testing
|
||||
|
||||
### Key Packages
|
||||
- **`pkg/querier/`** - Query engine for telemetry data (logs, traces, metrics)
|
||||
- **`pkg/telemetrystore/`** - ClickHouse telemetry storage interface
|
||||
- **`pkg/sqlstore/`** - Relational database (SQLite/PostgreSQL) for metadata
|
||||
- **`pkg/apiserver/`** - HTTP API server with OpenAPI integration
|
||||
- **`pkg/alertmanager/`** - Alert management
|
||||
- **`pkg/authn/`, `pkg/authz/`** - Authentication and authorization
|
||||
- **`pkg/flagger/`** - Feature flags (OpenFeature-based)
|
||||
- **`pkg/errors/`** - Structured error handling
|
||||
|
||||
### Enterprise vs Community
|
||||
- **`cmd/community/`** - Community edition entry point
|
||||
- **`cmd/enterprise/`** - Enterprise edition entry point
|
||||
- **`ee/`** - Enterprise-only features
|
||||
|
||||
## Code Conventions
|
||||
|
||||
### Error Handling
|
||||
Use the custom `pkg/errors` package instead of standard library:
|
||||
```go
|
||||
errors.New(typ, code, message) // Instead of errors.New()
|
||||
errors.Newf(typ, code, message, args...) // Instead of fmt.Errorf()
|
||||
errors.Wrapf(err, typ, code, msg) // Wrap with context
|
||||
```
|
||||
|
||||
Define domain-specific error codes:
|
||||
```go
|
||||
var CodeThingNotFound = errors.MustNewCode("thing_not_found")
|
||||
```
|
||||
|
||||
### HTTP Handlers
|
||||
Handlers are thin adapters in modules that:
|
||||
1. Extract auth context from request
|
||||
2. Decode request body using `binding` package
|
||||
3. Call module functions
|
||||
4. Return responses using `render` package
|
||||
|
||||
Register routes in `pkg/apiserver/signozapiserver/` with `handler.New()` and `OpenAPIDef`.
|
||||
|
||||
### SQL/Database
|
||||
- Use Bun ORM via `sqlstore.BunDBCtx(ctx)`
|
||||
- Star schema with `organizations` as central entity
|
||||
- All tables have `id`, `created_at`, `updated_at`, `org_id` columns
|
||||
- Write idempotent migrations in `pkg/sqlmigration/`
|
||||
- No `ON CASCADE` deletes - handle in application logic
|
||||
|
||||
### REST Endpoints
|
||||
- Use plural resource names: `/v1/organizations`, `/v1/users`
|
||||
- Use `me` for current user/org: `/v1/organizations/me/users`
|
||||
- Follow RESTful conventions for CRUD operations
|
||||
|
||||
### Linting Rules (from .golangci.yml)
|
||||
- Don't use `errors` package - use `pkg/errors`
|
||||
- Don't use `zap` logger - use `slog`
|
||||
- Don't use `fmt.Errorf` or `fmt.Print*`
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
- Run with race detector: `go test -race ./...`
|
||||
- Provider mocks are in `<provider>test/` packages
|
||||
|
||||
### Integration Tests
|
||||
- Located in `tests/integration/`
|
||||
- Use pytest with testcontainers
|
||||
- Files prefixed with numbers for execution order (e.g., `01_database.py`)
|
||||
- Always use `--reuse` flag during development
|
||||
- Fixtures in `tests/integration/fixtures/`
|
||||
36
.claude/skills/commit/SKILL.md
Normal file
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: commit
|
||||
description: Create a conventional commit with staged changes
|
||||
disable-model-invocation: true
|
||||
allowed-tools: Bash(git:*)
|
||||
---
|
||||
|
||||
# Create Conventional Commit
|
||||
|
||||
Commit staged changes using conventional commit format: `type(scope): description`
|
||||
|
||||
## Types
|
||||
|
||||
- `feat:` - New feature
|
||||
- `fix:` - Bug fix
|
||||
- `chore:` - Maintenance/refactor/tooling
|
||||
- `test:` - Tests only
|
||||
- `docs:` - Documentation
|
||||
|
||||
## Process
|
||||
|
||||
1. Review staged changes: `git diff --cached`
|
||||
2. Determine type, optional scope, and description (imperative, <70 chars)
|
||||
3. Commit using HEREDOC:
|
||||
```bash
|
||||
git commit -m "$(cat <<'EOF'
|
||||
type(scope): description
|
||||
EOF
|
||||
)"
|
||||
```
|
||||
4. Verify: `git log -1`
|
||||
|
||||
## Notes
|
||||
|
||||
- Description: imperative mood, lowercase, no period
|
||||
- Body: explain WHY, not WHAT (code shows what)
|
||||
55
.claude/skills/raise-pr/SKILL.md
Normal file
@@ -0,0 +1,55 @@
|
||||
---
|
||||
name: raise-pr
|
||||
description: Create a pull request with auto-filled template. Pass 'commit' to commit staged changes first.
|
||||
disable-model-invocation: true
|
||||
allowed-tools: Bash(gh:*, git:*), Read
|
||||
argument-hint: [commit?]
|
||||
---
|
||||
|
||||
# Raise Pull Request
|
||||
|
||||
Create a PR with auto-filled template from commits after origin/main.
|
||||
|
||||
## Arguments
|
||||
|
||||
- No argument: Create PR with existing commits
|
||||
- `commit`: Commit staged changes first, then create PR
|
||||
|
||||
## Process
|
||||
|
||||
1. **If `$ARGUMENTS` is "commit"**: Review staged changes and commit with descriptive message
|
||||
- Check for staged changes: `git diff --cached --stat`
|
||||
- If changes exist:
|
||||
- Review the changes: `git diff --cached`
|
||||
- Create a short and clear commit message based on the changes
|
||||
- Commit command: `git commit -m "message"`
|
||||
|
||||
2. **Analyze commits since origin/main**:
|
||||
- `git log origin/main..HEAD --pretty=format:"%s%n%b"` - get commit messages
|
||||
- `git diff origin/main...HEAD --stat` - see changes
|
||||
|
||||
3. **Read template**: `.github/pull_request_template.md`
|
||||
|
||||
4. **Generate PR**:
|
||||
- **Title**: Short (<70 chars), from commit messages or main change
|
||||
- **Body**: Fill template sections based on commits/changes:
|
||||
- Summary (why/what/approach) - end with "Closes #<issue_number>" if issue number is available from branch name (git branch --show-current)
|
||||
- Change Type checkboxes
|
||||
- Bug Context (if applicable)
|
||||
- Testing Strategy
|
||||
- Risk Assessment
|
||||
- Changelog (if user-facing)
|
||||
- Checklist
|
||||
|
||||
5. **Create PR**:
|
||||
```bash
|
||||
git push -u origin $(git branch --show-current)
|
||||
gh pr create --base main --title "..." --body "..."
|
||||
gh pr view
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- Analyze ALL commits messages from origin/main to HEAD
|
||||
- Fill template sections based on code analysis
|
||||
- Leave the sections of PR template as it is if you can't determine
|
||||
@@ -42,7 +42,7 @@ services:
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
schema-migrator-sync:
|
||||
image: signoz/signoz-schema-migrator:v0.129.6
|
||||
image: signoz/signoz-schema-migrator:v0.129.12
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -55,7 +55,7 @@ services:
|
||||
condition: service_healthy
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
image: signoz/signoz-schema-migrator:v0.129.6
|
||||
image: signoz/signoz-schema-migrator:v0.129.12
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
142
.github/CODEOWNERS
vendored
@@ -1,87 +1,133 @@
|
||||
# CODEOWNERS info: https://help.github.com/en/articles/about-code-owners
|
||||
|
||||
# Owners are automatically requested for review for PRs that changes code
|
||||
|
||||
# that they own.
|
||||
|
||||
/frontend/ @SigNoz/frontend @YounixM
|
||||
/frontend/src/container/MetricsApplication @srikanthccv
|
||||
/frontend/src/container/NewWidget/RightContainer/types.ts @srikanthccv
|
||||
/frontend/ @SigNoz/frontend-maintainers
|
||||
|
||||
# Dashboard, Alert, Metrics, Service Map, Services
|
||||
/frontend/src/container/ListOfDashboard/ @srikanthccv
|
||||
/frontend/src/container/NewDashboard/ @srikanthccv
|
||||
/frontend/src/pages/DashboardsListPage/ @srikanthccv
|
||||
/frontend/src/pages/DashboardWidget/ @srikanthccv
|
||||
/frontend/src/pages/NewDashboard/ @srikanthccv
|
||||
/frontend/src/providers/Dashboard/ @srikanthccv
|
||||
# Onboarding
|
||||
|
||||
# Alerts
|
||||
/frontend/src/container/AlertHistory/ @srikanthccv
|
||||
/frontend/src/container/AllAlertChannels/ @srikanthccv
|
||||
/frontend/src/container/AnomalyAlertEvaluationView/ @srikanthccv
|
||||
/frontend/src/container/CreateAlertChannels/ @srikanthccv
|
||||
/frontend/src/container/CreateAlertRule/ @srikanthccv
|
||||
/frontend/src/container/EditAlertChannels/ @srikanthccv
|
||||
/frontend/src/container/FormAlertChannels/ @srikanthccv
|
||||
/frontend/src/container/FormAlertRules/ @srikanthccv
|
||||
/frontend/src/container/ListAlertRules/ @srikanthccv
|
||||
/frontend/src/container/TriggeredAlerts/ @srikanthccv
|
||||
/frontend/src/pages/AlertChannelCreate/ @srikanthccv
|
||||
/frontend/src/pages/AlertDetails/ @srikanthccv
|
||||
/frontend/src/pages/AlertHistory/ @srikanthccv
|
||||
/frontend/src/pages/AlertList/ @srikanthccv
|
||||
/frontend/src/pages/CreateAlert/ @srikanthccv
|
||||
/frontend/src/providers/Alert.tsx @srikanthccv
|
||||
|
||||
# Metrics
|
||||
/frontend/src/container/MetricsExplorer/ @srikanthccv
|
||||
/frontend/src/pages/MetricsApplication/ @srikanthccv
|
||||
/frontend/src/pages/MetricsExplorer/ @srikanthccv
|
||||
|
||||
# Services and Service Map
|
||||
/frontend/src/container/ServiceApplication/ @srikanthccv
|
||||
/frontend/src/container/ServiceTable/ @srikanthccv
|
||||
/frontend/src/pages/Services/ @srikanthccv
|
||||
/frontend/src/pages/ServiceTopLevelOperations/ @srikanthccv
|
||||
/frontend/src/container/Home/Services/ @srikanthccv
|
||||
/frontend/src/container/OnboardingV2Container/onboarding-configs/onboarding-config-with-links.json @makeavish
|
||||
/frontend/src/container/OnboardingV2Container/AddDataSource/AddDataSource.tsx @makeavish
|
||||
|
||||
/deploy/ @SigNoz/devops
|
||||
.github @SigNoz/devops
|
||||
|
||||
# Scaffold Owners
|
||||
/pkg/config/ @grandwizard28
|
||||
/pkg/errors/ @grandwizard28
|
||||
/pkg/factory/ @grandwizard28
|
||||
/pkg/types/ @grandwizard28
|
||||
/pkg/valuer/ @grandwizard28
|
||||
/cmd/ @grandwizard28
|
||||
.golangci.yml @grandwizard28
|
||||
|
||||
/pkg/config/ @vikrantgupta25
|
||||
/pkg/errors/ @vikrantgupta25
|
||||
/pkg/factory/ @vikrantgupta25
|
||||
/pkg/types/ @vikrantgupta25
|
||||
/pkg/valuer/ @vikrantgupta25
|
||||
/cmd/ @vikrantgupta25
|
||||
.golangci.yml @vikrantgupta25
|
||||
|
||||
# Zeus Owners
|
||||
|
||||
/pkg/zeus/ @vikrantgupta25
|
||||
/ee/zeus/ @vikrantgupta25
|
||||
/pkg/licensing/ @vikrantgupta25
|
||||
/ee/licensing/ @vikrantgupta25
|
||||
|
||||
# SQL Owners
|
||||
|
||||
/pkg/sqlmigration/ @vikrantgupta25
|
||||
/ee/sqlmigration/ @vikrantgupta25
|
||||
/pkg/sqlschema/ @vikrantgupta25
|
||||
/ee/sqlschema/ @vikrantgupta25
|
||||
|
||||
# Analytics Owners
|
||||
|
||||
/pkg/analytics/ @vikrantgupta25
|
||||
/pkg/statsreporter/ @vikrantgupta25
|
||||
|
||||
# Querier Owners
|
||||
|
||||
/pkg/querier/ @srikanthccv
|
||||
/pkg/variables/ @srikanthccv
|
||||
/pkg/types/querybuildertypes/ @srikanthccv
|
||||
/pkg/types/telemetrytypes/ @srikanthccv
|
||||
/pkg/querybuilder/ @srikanthccv
|
||||
/pkg/telemetrylogs/ @srikanthccv
|
||||
/pkg/telemetrymetadata/ @srikanthccv
|
||||
/pkg/telemetrymetrics/ @srikanthccv
|
||||
/pkg/telemetrytraces/ @srikanthccv
|
||||
|
||||
# AuthN / AuthZ Owners
|
||||
# Metrics
|
||||
|
||||
/pkg/authz/ @vikrantgupta25 @grandwizard28
|
||||
/pkg/types/metrictypes/ @srikanthccv
|
||||
/pkg/types/metricsexplorertypes/ @srikanthccv
|
||||
/pkg/modules/metricsexplorer/ @srikanthccv
|
||||
/pkg/prometheus/ @srikanthccv
|
||||
|
||||
# APM
|
||||
|
||||
/pkg/types/servicetypes/ @srikanthccv
|
||||
/pkg/types/apdextypes/ @srikanthccv
|
||||
/pkg/modules/apdex/ @srikanthccv
|
||||
/pkg/modules/services/ @srikanthccv
|
||||
|
||||
# Dashboard
|
||||
|
||||
/pkg/types/dashboardtypes/ @srikanthccv
|
||||
/pkg/modules/dashboard/ @srikanthccv
|
||||
|
||||
# Rule/Alertmanager
|
||||
|
||||
/pkg/types/ruletypes/ @srikanthccv
|
||||
/pkg/types/alertmanagertypes @srikanthccv
|
||||
/pkg/alertmanager/ @srikanthccv
|
||||
/pkg/ruler/ @srikanthccv
|
||||
|
||||
# Correlation-adjacent
|
||||
|
||||
/pkg/contextlinks/ @srikanthccv
|
||||
/pkg/types/parsertypes/ @srikanthccv
|
||||
/pkg/queryparser/ @srikanthccv
|
||||
|
||||
# AuthN / AuthZ Owners
|
||||
|
||||
/pkg/authz/ @vikrantgupta25
|
||||
/ee/authz/ @vikrantgupta25
|
||||
/pkg/authn/ @vikrantgupta25
|
||||
/ee/authn/ @vikrantgupta25
|
||||
/pkg/modules/user/ @vikrantgupta25
|
||||
/pkg/modules/session/ @vikrantgupta25
|
||||
/pkg/modules/organization/ @vikrantgupta25
|
||||
/pkg/modules/authdomain/ @vikrantgupta25
|
||||
/pkg/modules/role/ @vikrantgupta25
|
||||
|
||||
# Integration tests
|
||||
|
||||
/tests/integration/ @vikrantgupta25
|
||||
|
||||
# OpenAPI types generator
|
||||
|
||||
/frontend/src/api @SigNoz/frontend-maintainers
|
||||
|
||||
# Dashboard Owners
|
||||
|
||||
/frontend/src/hooks/dashboard/ @SigNoz/pulse-frontend
|
||||
|
||||
## Dashboard Types
|
||||
|
||||
/frontend/src/api/types/dashboard/ @SigNoz/pulse-frontend
|
||||
|
||||
## Dashboard List
|
||||
|
||||
/frontend/src/pages/DashboardsListPage/ @SigNoz/pulse-frontend
|
||||
/frontend/src/container/ListOfDashboard/ @SigNoz/pulse-frontend
|
||||
|
||||
## Dashboard Page
|
||||
|
||||
/frontend/src/pages/DashboardPage/ @SigNoz/pulse-frontend
|
||||
/frontend/src/container/DashboardContainer/ @SigNoz/pulse-frontend
|
||||
/frontend/src/container/GridCardLayout/ @SigNoz/pulse-frontend
|
||||
/frontend/src/container/NewWidget/ @SigNoz/pulse-frontend
|
||||
|
||||
## Public Dashboard Page
|
||||
|
||||
/frontend/src/pages/PublicDashboard/ @SigNoz/pulse-frontend
|
||||
/frontend/src/container/PublicDashboardContainer/ @SigNoz/pulse-frontend
|
||||
|
||||
89
.github/pull_request_template.md
vendored
@@ -1,72 +1,85 @@
|
||||
## 📄 Summary
|
||||
|
||||
<!-- Describe the purpose of the PR in a few sentences. What does it fix/add/update? -->
|
||||
## Pull Request
|
||||
|
||||
---
|
||||
|
||||
## ✅ Changes
|
||||
### 📄 Summary
|
||||
> Why does this change exist?
|
||||
> What problem does it solve, and why is this the right approach?
|
||||
|
||||
- [ ] Feature: Brief description
|
||||
- [ ] Bug fix: Brief description
|
||||
|
||||
|
||||
#### Screenshots / Screen Recordings (if applicable)
|
||||
> Include screenshots or screen recordings that clearly show the behavior before the change and the result after the change. This helps reviewers quickly understand the impact and verify the update.
|
||||
|
||||
|
||||
#### Issues closed by this PR
|
||||
> Reference issues using `Closes #issue-number` to enable automatic closure on merge.
|
||||
|
||||
---
|
||||
|
||||
## 🏷️ Required: Add Relevant Labels
|
||||
### ✅ Change Type
|
||||
_Select all that apply_
|
||||
|
||||
> ⚠️ **Manually add appropriate labels in the PR sidebar**
|
||||
Please select one or more labels (as applicable):
|
||||
|
||||
ex:
|
||||
|
||||
- `frontend`
|
||||
- `backend`
|
||||
- `devops`
|
||||
- `bug`
|
||||
- `enhancement`
|
||||
- `ui`
|
||||
- `test`
|
||||
- [ ] ✨ Feature
|
||||
- [ ] 🐛 Bug fix
|
||||
- [ ] ♻️ Refactor
|
||||
- [ ] 🛠️ Infra / Tooling
|
||||
- [ ] 🧪 Test-only
|
||||
|
||||
---
|
||||
|
||||
## 👥 Reviewers
|
||||
### 🐛 Bug Context
|
||||
> Required if this PR fixes a bug
|
||||
|
||||
> Tag the relevant teams for review:
|
||||
#### Root Cause
|
||||
> What caused the issue?
|
||||
> Regression, faulty assumption, edge case, refactor, etc.
|
||||
|
||||
- frontend / backend / devops
|
||||
#### Fix Strategy
|
||||
> How does this PR address the root cause?
|
||||
|
||||
---
|
||||
|
||||
## 🧪 How to Test
|
||||
### 🧪 Testing Strategy
|
||||
> How was this change validated?
|
||||
|
||||
<!-- Describe how reviewers can test this PR -->
|
||||
1. ...
|
||||
2. ...
|
||||
3. ...
|
||||
- Tests added/updated:
|
||||
- Manual verification:
|
||||
- Edge cases covered:
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Related Issues
|
||||
### ⚠️ Risk & Impact Assessment
|
||||
> What could break? How do we recover?
|
||||
|
||||
<!-- Reference any related issues (e.g. Fixes #123, Closes #456) -->
|
||||
Closes #
|
||||
- Blast radius:
|
||||
- Potential regressions:
|
||||
- Rollback plan:
|
||||
|
||||
---
|
||||
|
||||
## 📸 Screenshots / Screen Recording (if applicable / mandatory for UI related changes)
|
||||
### 📝 Changelog
|
||||
> Fill only if this affects users, APIs, UI, or documented behavior
|
||||
> Use **N/A** for internal or non-user-facing changes
|
||||
|
||||
<!-- Add screenshots or GIFs to help visualize changes -->
|
||||
| Field | Value |
|
||||
|------|-------|
|
||||
| Deployment Type | Cloud / OSS / Enterprise |
|
||||
| Change Type | Feature / Bug Fix / Maintenance |
|
||||
| Description | User-facing summary |
|
||||
|
||||
---
|
||||
|
||||
## 📋 Checklist
|
||||
|
||||
- [ ] Dev Review
|
||||
- [ ] Test cases added (Unit/ Integration / E2E)
|
||||
- [ ] Manually tested the changes
|
||||
|
||||
### 📋 Checklist
|
||||
- [ ] Tests added or explicitly not required
|
||||
- [ ] Manually tested
|
||||
- [ ] Breaking changes documented
|
||||
- [ ] Backward compatibility considered
|
||||
|
||||
---
|
||||
|
||||
## 👀 Notes for Reviewers
|
||||
|
||||
<!-- Anything reviewers should keep in mind while reviewing -->
|
||||
|
||||
---
|
||||
|
||||
7
.github/workflows/build-community.yaml
vendored
@@ -3,8 +3,8 @@ name: build-community
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+'
|
||||
- "v[0-9]+.[0-9]+.[0-9]+"
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
|
||||
defaults:
|
||||
run:
|
||||
@@ -69,14 +69,13 @@ jobs:
|
||||
GO_BUILD_CONTEXT: ./cmd/community
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-ldflags='-s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=community
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.time=${{ needs.prepare.outputs.time }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./cmd/community/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
|
||||
7
.github/workflows/build-enterprise.yaml
vendored
@@ -69,6 +69,7 @@ jobs:
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> frontend/.env
|
||||
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> frontend/.env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> frontend/.env
|
||||
echo 'PYLON_IDENTITY_SECRET="${{ secrets.PYLON_IDENTITY_SECRET }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
@@ -84,7 +85,7 @@ jobs:
|
||||
JS_INPUT_ARTIFACT_CACHE_KEY: enterprise-dotenv-${{ github.sha }}
|
||||
JS_INPUT_ARTIFACT_PATH: frontend/.env
|
||||
JS_OUTPUT_ARTIFACT_CACHE_KEY: enterprise-jsbuild-${{ github.sha }}
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
JS_OUTPUT_ARTIFACT_PATH: frontend/build
|
||||
DOCKER_BUILD: false
|
||||
DOCKER_MANIFEST: false
|
||||
go-build:
|
||||
@@ -99,7 +100,7 @@ jobs:
|
||||
GO_BUILD_CONTEXT: ./cmd/enterprise
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-ldflags='-s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
@@ -107,10 +108,8 @@ jobs:
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
|
||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
|
||||
7
.github/workflows/build-staging.yaml
vendored
@@ -68,6 +68,7 @@ jobs:
|
||||
echo 'TUNNEL_DOMAIN="${{ secrets.NP_TUNNEL_DOMAIN }}"' >> frontend/.env
|
||||
echo 'PYLON_APP_ID="${{ secrets.NP_PYLON_APP_ID }}"' >> frontend/.env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.NP_APPCUES_APP_ID }}"' >> frontend/.env
|
||||
echo 'PYLON_IDENTITY_SECRET="${{ secrets.NP_PYLON_IDENTITY_SECRET }}"' >> frontend/.env
|
||||
- name: cache-dotenv
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
@@ -98,7 +99,7 @@ jobs:
|
||||
GO_BUILD_CONTEXT: ./cmd/enterprise
|
||||
GO_BUILD_FLAGS: >-
|
||||
-tags timetzdata
|
||||
-ldflags='-linkmode external -extldflags \"-static\" -s -w
|
||||
-ldflags='-s -w
|
||||
-X github.com/SigNoz/signoz/pkg/version.version=${{ needs.prepare.outputs.version }}
|
||||
-X github.com/SigNoz/signoz/pkg/version.variant=enterprise
|
||||
-X github.com/SigNoz/signoz/pkg/version.hash=${{ needs.prepare.outputs.hash }}
|
||||
@@ -106,10 +107,8 @@ jobs:
|
||||
-X github.com/SigNoz/signoz/pkg/version.branch=${{ needs.prepare.outputs.branch }}
|
||||
-X github.com/SigNoz/signoz/ee/zeus.url=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.staging.signoz.cloud
|
||||
-X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.staging.signoz.cloud/api/v1
|
||||
-X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr'
|
||||
GO_CGO_ENABLED: 1
|
||||
DOCKER_BASE_IMAGES: '{"alpine": "alpine:3.20.3"}'
|
||||
DOCKER_DOCKERFILE_PATH: ./cmd/enterprise/Dockerfile.multi-arch
|
||||
DOCKER_MANIFEST: true
|
||||
@@ -125,4 +124,4 @@ jobs:
|
||||
GITHUB_SILENT: true
|
||||
GITHUB_REPOSITORY_NAME: charts-saas-v3-staging
|
||||
GITHUB_EVENT_NAME: releaser
|
||||
GITHUB_EVENT_PAYLOAD: "{\"deployment\": \"${{ needs.prepare.outputs.deployment }}\", \"signoz_version\": \"${{ needs.prepare.outputs.version }}\"}"
|
||||
GITHUB_EVENT_PAYLOAD: '{"deployment": "${{ needs.prepare.outputs.deployment }}", "signoz_version": "${{ needs.prepare.outputs.version }}"}'
|
||||
|
||||
7
.github/workflows/commitci.yaml
vendored
@@ -25,3 +25,10 @@ jobs:
|
||||
else
|
||||
echo "No references to 'ee' packages found in 'pkg' directory"
|
||||
fi
|
||||
|
||||
if grep -R --include="*.go" '.*/ee/.*' cmd/community/; then
|
||||
echo "Error: Found references to 'ee' packages in 'cmd/community' directory"
|
||||
exit 1
|
||||
else
|
||||
echo "No references to 'ee' packages found in 'cmd/community' directory"
|
||||
fi
|
||||
|
||||
20
.github/workflows/goci.yaml
vendored
@@ -65,6 +65,10 @@ jobs:
|
||||
set -ex
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu musl-tools
|
||||
- name: node-install
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: "22"
|
||||
- name: docker-community
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -73,3 +77,19 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
make docker-build-enterprise
|
||||
openapi:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: self-checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: go-install
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "1.24"
|
||||
- name: generate-openapi
|
||||
run: |
|
||||
go run cmd/enterprise/*.go generate openapi
|
||||
git diff --compact-summary --exit-code || (echo; echo "Unexpected difference in openapi spec. Run go run cmd/enterprise/*.go generate openapi locally and commit."; exit 1)
|
||||
|
||||
1
.github/workflows/gor-signoz.yaml
vendored
@@ -35,6 +35,7 @@ jobs:
|
||||
echo 'POSTHOG_KEY="${{ secrets.POSTHOG_KEY }}"' >> .env
|
||||
echo 'PYLON_APP_ID="${{ secrets.PYLON_APP_ID }}"' >> .env
|
||||
echo 'APPCUES_APP_ID="${{ secrets.APPCUES_APP_ID }}"' >> .env
|
||||
echo 'PYLON_IDENTITY_SECRET="${{ secrets.PYLON_IDENTITY_SECRET }}"' >> .env
|
||||
- name: build-frontend
|
||||
run: make js-build
|
||||
- name: upload-frontend-artifact
|
||||
|
||||
61
.github/workflows/integrationci.yaml
vendored
@@ -9,21 +9,51 @@ on:
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
fmtlint:
|
||||
if: |
|
||||
((github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))) && contains(github.event.pull_request.labels.*.name, 'safe-to-integrate')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.13
|
||||
- name: uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
- name: install
|
||||
run: |
|
||||
cd tests/integration && uv sync
|
||||
- name: fmt
|
||||
run: |
|
||||
make py-fmt
|
||||
- name: lint
|
||||
run: |
|
||||
make py-lint
|
||||
test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
src:
|
||||
- bootstrap
|
||||
- auth
|
||||
- passwordauthn
|
||||
- callbackauthn
|
||||
- cloudintegrations
|
||||
- dashboard
|
||||
- querier
|
||||
- ttl
|
||||
- preference
|
||||
- logspipelines
|
||||
sqlstore-provider:
|
||||
- postgres
|
||||
- sqlite
|
||||
clickhouse-version:
|
||||
- 25.5.6
|
||||
- 25.10.1
|
||||
schema-migrator-version:
|
||||
- v0.129.6
|
||||
- v0.129.7
|
||||
postgres-version:
|
||||
- 15
|
||||
if: |
|
||||
@@ -37,15 +67,32 @@ jobs:
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.13
|
||||
- name: poetry
|
||||
- name: uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
- name: install
|
||||
run: |
|
||||
python -m pip install poetry==2.1.2
|
||||
python -m poetry config virtualenvs.in-project true
|
||||
cd tests/integration && poetry install --no-root
|
||||
cd tests/integration && uv sync
|
||||
- name: webdriver
|
||||
run: |
|
||||
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
|
||||
echo "deb http://dl.google.com/linux/chrome/deb/ stable main" | sudo tee -a /etc/apt/sources.list.d/google-chrome.list
|
||||
sudo apt-get update -qqy
|
||||
sudo apt-get -qqy install google-chrome-stable
|
||||
CHROME_VERSION=$(google-chrome-stable --version)
|
||||
CHROME_FULL_VERSION=${CHROME_VERSION%%.*}
|
||||
CHROME_MAJOR_VERSION=${CHROME_FULL_VERSION//[!0-9]}
|
||||
sudo rm /etc/apt/sources.list.d/google-chrome.list
|
||||
export CHROMEDRIVER_VERSION=`curl -s https://googlechromelabs.github.io/chrome-for-testing/LATEST_RELEASE_${CHROME_MAJOR_VERSION%%.*}`
|
||||
curl -L -O "https://storage.googleapis.com/chrome-for-testing-public/${CHROMEDRIVER_VERSION}/linux64/chromedriver-linux64.zip"
|
||||
unzip chromedriver-linux64.zip
|
||||
chmod +x chromedriver-linux64/chromedriver
|
||||
sudo mv chromedriver-linux64/chromedriver /usr/local/bin/chromedriver
|
||||
chromedriver -version
|
||||
google-chrome-stable --version
|
||||
- name: run
|
||||
run: |
|
||||
cd tests/integration && \
|
||||
poetry run pytest \
|
||||
uv run pytest \
|
||||
--basetemp=./tmp/ \
|
||||
src/${{matrix.src}} \
|
||||
--sqlstore-provider ${{matrix.sqlstore-provider}} \
|
||||
|
||||
13
.github/workflows/jsci.yaml
vendored
@@ -17,10 +17,23 @@ jobs:
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: setup node
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: "22"
|
||||
- name: install
|
||||
run: cd frontend && yarn install
|
||||
- name: tsc
|
||||
run: cd frontend && yarn tsc
|
||||
tsc2:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
uses: signoz/primus.workflows/.github/workflows/js-tsc.yaml@main
|
||||
secrets: inherit
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
test:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
|
||||
9
.gitignore
vendored
@@ -1,6 +1,9 @@
|
||||
|
||||
node_modules
|
||||
|
||||
.vscode
|
||||
!.vscode/settings.json
|
||||
|
||||
deploy/docker/environment_tiny/common_test
|
||||
frontend/node_modules
|
||||
frontend/.pnp
|
||||
@@ -12,7 +15,6 @@ frontend/coverage
|
||||
|
||||
# production
|
||||
frontend/build
|
||||
frontend/.vscode
|
||||
frontend/.yarnclean
|
||||
frontend/.temp_cache
|
||||
frontend/test-results
|
||||
@@ -31,7 +33,6 @@ frontend/src/constants/env.ts
|
||||
|
||||
.idea
|
||||
|
||||
**/.vscode
|
||||
**/build
|
||||
**/storage
|
||||
**/locust-scripts/__pycache__/
|
||||
@@ -49,6 +50,7 @@ ee/query-service/tests/test-deploy/data/
|
||||
# local data
|
||||
*.backup
|
||||
*.db
|
||||
**/db
|
||||
/deploy/docker/clickhouse-setup/data/
|
||||
/deploy/docker-swarm/clickhouse-setup/data/
|
||||
bin/
|
||||
@@ -105,7 +107,6 @@ dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
@@ -221,8 +222,6 @@ cython_debug/
|
||||
#.idea/
|
||||
|
||||
### Python Patch ###
|
||||
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
||||
poetry.toml
|
||||
|
||||
# ruff
|
||||
.ruff_cache/
|
||||
|
||||
@@ -1,39 +1,64 @@
|
||||
version: "2"
|
||||
linters:
|
||||
default: standard
|
||||
default: none
|
||||
enable:
|
||||
- bodyclose
|
||||
- depguard
|
||||
- errcheck
|
||||
- forbidigo
|
||||
- govet
|
||||
- iface
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nilnil
|
||||
- sloglint
|
||||
- depguard
|
||||
- iface
|
||||
- wastedassign
|
||||
- unparam
|
||||
- forbidigo
|
||||
|
||||
linters-settings:
|
||||
sloglint:
|
||||
no-mixed-args: true
|
||||
kv-only: true
|
||||
no-global: all
|
||||
context: all
|
||||
static-msg: true
|
||||
msg-style: lowercased
|
||||
key-naming-case: snake
|
||||
depguard:
|
||||
rules:
|
||||
nozap:
|
||||
deny:
|
||||
- pkg: "go.uber.org/zap"
|
||||
desc: "Do not use zap logger. Use slog instead."
|
||||
noerrors:
|
||||
deny:
|
||||
- pkg: "errors"
|
||||
desc: "Do not use errors package. Use github.com/SigNoz/signoz/pkg/errors instead."
|
||||
iface:
|
||||
enable:
|
||||
- identical
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- "pkg/query-service"
|
||||
- "ee/query-service"
|
||||
- "scripts/"
|
||||
- unused
|
||||
settings:
|
||||
depguard:
|
||||
rules:
|
||||
noerrors:
|
||||
deny:
|
||||
- pkg: errors
|
||||
desc: Do not use errors package. Use github.com/SigNoz/signoz/pkg/errors instead.
|
||||
nozap:
|
||||
deny:
|
||||
- pkg: go.uber.org/zap
|
||||
desc: Do not use zap logger. Use slog instead.
|
||||
forbidigo:
|
||||
forbid:
|
||||
- pattern: fmt.Errorf
|
||||
- pattern: ^(fmt\.Print.*|print|println)$
|
||||
iface:
|
||||
enable:
|
||||
- identical
|
||||
sloglint:
|
||||
no-mixed-args: true
|
||||
kv-only: true
|
||||
no-global: all
|
||||
context: all
|
||||
static-msg: true
|
||||
key-naming-case: snake
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- pkg/query-service
|
||||
- ee/query-service
|
||||
- scripts/
|
||||
- tmp/
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
17
.mockery.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
# Link to template variables: https://pkg.go.dev/github.com/vektra/mockery/v3/config#TemplateData
|
||||
template: testify
|
||||
packages:
|
||||
github.com/SigNoz/signoz/pkg/alertmanager:
|
||||
config:
|
||||
all: true
|
||||
dir: '{{.InterfaceDir}}/alertmanagertest'
|
||||
filename: "alertmanager.go"
|
||||
structname: 'Mock{{.InterfaceName}}'
|
||||
pkgname: '{{.SrcPackageName}}test'
|
||||
github.com/SigNoz/signoz/pkg/tokenizer:
|
||||
config:
|
||||
all: true
|
||||
dir: '{{.InterfaceDir}}/tokenizertest'
|
||||
filename: "tokenizer.go"
|
||||
structname: 'Mock{{.InterfaceName}}'
|
||||
pkgname: '{{.SrcPackageName}}test'
|
||||
13
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
{
|
||||
"eslint.workingDirectories": ["./frontend"],
|
||||
"editor.formatOnSave": true,
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.fixAll.eslint": "explicit"
|
||||
},
|
||||
"prettier.requireConfig": true,
|
||||
"[go]": {
|
||||
"editor.formatOnSave": true,
|
||||
"editor.defaultFormatter": "golang.go"
|
||||
}
|
||||
}
|
||||
68
Makefile
@@ -72,6 +72,12 @@ devenv-up: devenv-clickhouse devenv-signoz-otel-collector ## Start both clickhou
|
||||
@echo " - ClickHouse: http://localhost:8123"
|
||||
@echo " - Signoz OTel Collector: grpc://localhost:4317, http://localhost:4318"
|
||||
|
||||
.PHONY: devenv-clickhouse-clean
|
||||
devenv-clickhouse-clean: ## Clean all ClickHouse data from filesystem
|
||||
@echo "Removing ClickHouse data..."
|
||||
@rm -rf .devenv/docker/clickhouse/fs/tmp/*
|
||||
@echo "ClickHouse data cleaned!"
|
||||
|
||||
##############################################################
|
||||
# go commands
|
||||
##############################################################
|
||||
@@ -80,14 +86,13 @@ go-run-enterprise: ## Runs the enterprise go backend server
|
||||
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
|
||||
SIGNOZ_WEB_ENABLED=false \
|
||||
SIGNOZ_JWT_SECRET=secret \
|
||||
SIGNOZ_TOKENIZER_JWT_SECRET=secret \
|
||||
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
|
||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER=cluster \
|
||||
go run -race \
|
||||
$(GO_BUILD_CONTEXT_ENTERPRISE)/*.go \
|
||||
--config ./conf/prometheus.yml \
|
||||
--cluster cluster
|
||||
$(GO_BUILD_CONTEXT_ENTERPRISE)/*.go server
|
||||
|
||||
.PHONY: go-test
|
||||
go-test: ## Runs go unit tests
|
||||
@@ -98,14 +103,13 @@ go-run-community: ## Runs the community go backend server
|
||||
@SIGNOZ_INSTRUMENTATION_LOGS_LEVEL=debug \
|
||||
SIGNOZ_SQLSTORE_SQLITE_PATH=signoz.db \
|
||||
SIGNOZ_WEB_ENABLED=false \
|
||||
SIGNOZ_JWT_SECRET=secret \
|
||||
SIGNOZ_TOKENIZER_JWT_SECRET=secret \
|
||||
SIGNOZ_ALERTMANAGER_PROVIDER=signoz \
|
||||
SIGNOZ_TELEMETRYSTORE_PROVIDER=clickhouse \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_DSN=tcp://127.0.0.1:9000 \
|
||||
SIGNOZ_TELEMETRYSTORE_CLICKHOUSE_CLUSTER=cluster \
|
||||
go run -race \
|
||||
$(GO_BUILD_CONTEXT_COMMUNITY)/*.go server \
|
||||
--config ./conf/prometheus.yml \
|
||||
--cluster cluster
|
||||
$(GO_BUILD_CONTEXT_COMMUNITY)/*.go server
|
||||
|
||||
.PHONY: go-build-community $(GO_BUILD_ARCHS_COMMUNITY)
|
||||
go-build-community: ## Builds the go backend server for community
|
||||
@@ -114,9 +118,9 @@ $(GO_BUILD_ARCHS_COMMUNITY): go-build-community-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)-community"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
else \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_COMMUNITY) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME)-community -ldflags "-s -w $(GO_BUILD_LDFLAGS_COMMUNITY)"; \
|
||||
fi
|
||||
|
||||
|
||||
@@ -127,9 +131,9 @@ $(GO_BUILD_ARCHS_ENTERPRISE): go-build-enterprise-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
else \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
fi
|
||||
|
||||
.PHONY: go-build-enterprise-race $(GO_BUILD_ARCHS_ENTERPRISE_RACE)
|
||||
@@ -139,9 +143,9 @@ $(GO_BUILD_ARCHS_ENTERPRISE_RACE): go-build-enterprise-race-%: $(TARGET_DIR)
|
||||
@mkdir -p $(TARGET_DIR)/$(OS)-$*
|
||||
@echo ">> building binary $(TARGET_DIR)/$(OS)-$*/$(NAME)"
|
||||
@if [ $* = "arm64" ]; then \
|
||||
CC=aarch64-linux-gnu-gcc CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
else \
|
||||
CGO_ENABLED=1 GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-linkmode external -extldflags '-static' -s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
GOARCH=$* GOOS=$(OS) go build -C $(GO_BUILD_CONTEXT_ENTERPRISE) -race -tags timetzdata -o $(TARGET_DIR)/$(OS)-$*/$(NAME) -ldflags "-s -w $(GO_BUILD_LDFLAGS_ENTERPRISE)"; \
|
||||
fi
|
||||
|
||||
##############################################################
|
||||
@@ -198,14 +202,40 @@ docker-buildx-enterprise: go-build-enterprise js-build
|
||||
##############################################################
|
||||
.PHONY: py-fmt
|
||||
py-fmt: ## Run black for integration tests
|
||||
@cd tests/integration && poetry run black .
|
||||
@cd tests/integration && uv run black .
|
||||
|
||||
.PHONY: py-lint
|
||||
py-lint: ## Run lint for integration tests
|
||||
@cd tests/integration && poetry run isort .
|
||||
@cd tests/integration && poetry run autoflake .
|
||||
@cd tests/integration && poetry run pylint .
|
||||
@cd tests/integration && uv run isort .
|
||||
@cd tests/integration && uv run autoflake .
|
||||
@cd tests/integration && uv run pylint .
|
||||
|
||||
.PHONY: py-test-setup
|
||||
py-test-setup: ## Runs integration tests
|
||||
@cd tests/integration && uv run pytest --basetemp=./tmp/ -vv --reuse --capture=no src/bootstrap/setup.py::test_setup
|
||||
|
||||
.PHONY: py-test-teardown
|
||||
py-test-teardown: ## Runs integration tests with teardown
|
||||
@cd tests/integration && uv run pytest --basetemp=./tmp/ -vv --teardown --capture=no src/bootstrap/setup.py::test_teardown
|
||||
|
||||
.PHONY: py-test
|
||||
py-test: ## Runs integration tests
|
||||
@cd tests/integration && poetry run pytest --basetemp=./tmp/ -vv --capture=no src/
|
||||
@cd tests/integration && uv run pytest --basetemp=./tmp/ -vv --capture=no src/
|
||||
|
||||
.PHONY: py-clean
|
||||
py-clean: ## Clear all pycache and pytest cache from tests directory recursively
|
||||
@echo ">> cleaning python cache files from tests directory"
|
||||
@find tests -type d -name __pycache__ -exec rm -rf {} + 2>/dev/null || true
|
||||
@find tests -type d -name .pytest_cache -exec rm -rf {} + 2>/dev/null || true
|
||||
@find tests -type f -name "*.pyc" -delete 2>/dev/null || true
|
||||
@find tests -type f -name "*.pyo" -delete 2>/dev/null || true
|
||||
@echo ">> python cache cleaned"
|
||||
|
||||
|
||||
##############################################################
|
||||
# generate commands
|
||||
##############################################################
|
||||
.PHONY: gen-mocks
|
||||
gen-mocks:
|
||||
@echo ">> Generating mocks"
|
||||
@mockery --config .mockery.yml
|
||||
@@ -236,7 +236,7 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
|
||||
#### DevOps
|
||||
|
||||
- [Prashant Shahi](https://github.com/prashant-shahi)
|
||||
- [Vibhu Pandey](https://github.com/grandwizard28)
|
||||
- [Vibhu Pandey](https://github.com/therealpandey)
|
||||
|
||||
<br /><br />
|
||||
|
||||
|
||||
@@ -12,12 +12,6 @@ builds:
|
||||
- id: signoz
|
||||
binary: bin/signoz
|
||||
main: ./cmd/community
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}
|
||||
{{- if eq .Arch "arm64" }}CC=aarch64-linux-gnu-gcc{{- end }}
|
||||
{{- end }}
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
@@ -36,8 +30,6 @@ builds:
|
||||
- -X github.com/SigNoz/signoz/pkg/version.time={{ .CommitTimestamp }}
|
||||
- -X github.com/SigNoz/signoz/pkg/version.branch={{ .Branch }}
|
||||
- -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
tags:
|
||||
- timetzdata
|
||||
|
||||
@@ -13,6 +13,7 @@ func main() {
|
||||
|
||||
// register a list of commands to the root command
|
||||
registerServer(cmd.RootCmd, logger)
|
||||
cmd.RegisterGenerate(cmd.RootCmd, logger)
|
||||
|
||||
cmd.Execute(logger)
|
||||
}
|
||||
|
||||
@@ -3,20 +3,28 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/cmd"
|
||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/authn"
|
||||
"github.com/SigNoz/signoz/pkg/authz"
|
||||
"github.com/SigNoz/signoz/pkg/authz/openfgaauthz"
|
||||
"github.com/SigNoz/signoz/pkg/authz/openfgaschema"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/gateway"
|
||||
"github.com/SigNoz/signoz/pkg/gateway/noopgateway"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/licensing/nooplicensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/role"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstorehook"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
@@ -49,19 +57,9 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
|
||||
// print the version
|
||||
version.Info.PrettyPrint(config.Version)
|
||||
|
||||
// add enterprise sqlstore factories to the community sqlstore factories
|
||||
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour)
|
||||
|
||||
signoz, err := signoz.New(
|
||||
ctx,
|
||||
config,
|
||||
jwt,
|
||||
zeus.Config{},
|
||||
noopzeus.NewProviderFactory(),
|
||||
licensing.Config{},
|
||||
@@ -76,13 +74,25 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
|
||||
},
|
||||
signoz.NewSQLStoreProviderFactories(),
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
func(ctx context.Context, providerSettings factory.ProviderSettings, store authtypes.AuthNStore, licensing licensing.Licensing) (map[authtypes.AuthNProvider]authn.AuthN, error) {
|
||||
return signoz.NewAuthNs(ctx, providerSettings, store, licensing)
|
||||
},
|
||||
func(ctx context.Context, sqlstore sqlstore.SQLStore) factory.ProviderFactory[authz.AuthZ, authz.Config] {
|
||||
return openfgaauthz.NewProviderFactory(sqlstore, openfgaschema.NewSchema().Get(ctx))
|
||||
},
|
||||
func(store sqlstore.SQLStore, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, _ role.Module, queryParser queryparser.QueryParser, _ querier.Querier, _ licensing.Licensing) dashboard.Module {
|
||||
return impldashboard.NewModule(impldashboard.NewStore(store), settings, analytics, orgGetter, queryParser)
|
||||
},
|
||||
func(_ licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config] {
|
||||
return noopgateway.NewProviderFactory()
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
server, err := app.NewServer(config, signoz, jwt)
|
||||
server, err := app.NewServer(config, signoz)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create server", "error", err)
|
||||
return err
|
||||
|
||||
@@ -3,7 +3,6 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/config"
|
||||
"github.com/SigNoz/signoz/pkg/config/envprovider"
|
||||
@@ -30,12 +29,3 @@ func NewSigNozConfig(ctx context.Context, logger *slog.Logger, flags signoz.Depr
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func NewJWTSecret(ctx context.Context, logger *slog.Logger) string {
|
||||
jwtSecret := os.Getenv("SIGNOZ_JWT_SECRET")
|
||||
if len(jwtSecret) == 0 {
|
||||
logger.ErrorContext(ctx, "🚨 CRITICAL SECURITY ISSUE: No JWT secret key specified!", "error", "SIGNOZ_JWT_SECRET environment variable is not set. This has dire consequences for the security of the application. Without a JWT secret, user sessions are vulnerable to tampering and unauthorized access. Please set the SIGNOZ_JWT_SECRET environment variable immediately. For more information, please refer to https://github.com/SigNoz/signoz/issues/8400.")
|
||||
}
|
||||
|
||||
return jwtSecret
|
||||
}
|
||||
|
||||
@@ -12,12 +12,6 @@ builds:
|
||||
- id: signoz
|
||||
binary: bin/signoz
|
||||
main: ./cmd/enterprise
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}
|
||||
{{- if eq .Arch "arm64" }}CC=aarch64-linux-gnu-gcc{{- end }}
|
||||
{{- end }}
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
@@ -37,11 +31,8 @@ builds:
|
||||
- -X github.com/SigNoz/signoz/pkg/version.branch={{ .Branch }}
|
||||
- -X github.com/SigNoz/signoz/ee/zeus.url=https://api.signoz.cloud
|
||||
- -X github.com/SigNoz/signoz/ee/zeus.deprecatedURL=https://license.signoz.io
|
||||
- -X github.com/SigNoz/signoz/ee/query-service/constants.ZeusURL=https://api.signoz.cloud
|
||||
- -X github.com/SigNoz/signoz/ee/query-service/constants.LicenseSignozIo=https://license.signoz.io/api/v1
|
||||
- -X github.com/SigNoz/signoz/pkg/analytics.key=9kRrJ7oPCGPEJLF6QjMPLt5bljFhRQBr
|
||||
- >-
|
||||
{{- if eq .Os "linux" }}-linkmode external -extldflags '-static'{{- end }}
|
||||
mod_timestamp: "{{ .CommitTimestamp }}"
|
||||
tags:
|
||||
- timetzdata
|
||||
|
||||
@@ -1,11 +1,3 @@
|
||||
FROM node:18-bullseye AS build
|
||||
|
||||
WORKDIR /opt/
|
||||
COPY ./frontend/ ./
|
||||
ENV NODE_OPTIONS=--max-old-space-size=8192
|
||||
RUN CI=1 yarn install
|
||||
RUN CI=1 yarn build
|
||||
|
||||
FROM golang:1.24-bullseye
|
||||
|
||||
ARG OS="linux"
|
||||
@@ -40,8 +32,6 @@ COPY Makefile Makefile
|
||||
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
|
||||
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
|
||||
|
||||
COPY --from=build /opt/build ./web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["/root/signoz", "server"]
|
||||
|
||||
47
cmd/enterprise/Dockerfile.with-web.integration
Normal file
@@ -0,0 +1,47 @@
|
||||
FROM node:18-bullseye AS build
|
||||
|
||||
WORKDIR /opt/
|
||||
COPY ./frontend/ ./
|
||||
ENV NODE_OPTIONS=--max-old-space-size=8192
|
||||
RUN CI=1 yarn install
|
||||
RUN CI=1 yarn build
|
||||
|
||||
FROM golang:1.24-bullseye
|
||||
|
||||
ARG OS="linux"
|
||||
ARG TARGETARCH
|
||||
ARG ZEUSURL
|
||||
|
||||
# This path is important for stacktraces
|
||||
WORKDIR $GOPATH/src/github.com/signoz/signoz
|
||||
WORKDIR /root
|
||||
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
g++ \
|
||||
gcc \
|
||||
libc6-dev \
|
||||
make \
|
||||
pkg-config \
|
||||
; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY ./cmd/ ./cmd/
|
||||
COPY ./ee/ ./ee/
|
||||
COPY ./pkg/ ./pkg/
|
||||
COPY ./templates/email /root/templates
|
||||
|
||||
COPY Makefile Makefile
|
||||
RUN TARGET_DIR=/root ARCHS=${TARGETARCH} ZEUS_URL=${ZEUSURL} LICENSE_URL=${ZEUSURL}/api/v1 make go-build-enterprise-race
|
||||
RUN mv /root/linux-${TARGETARCH}/signoz /root/signoz
|
||||
|
||||
COPY --from=build /opt/build ./web/
|
||||
|
||||
RUN chmod 755 /root /root/signoz
|
||||
|
||||
ENTRYPOINT ["/root/signoz", "server"]
|
||||
@@ -13,6 +13,7 @@ func main() {
|
||||
|
||||
// register a list of commands to the root command
|
||||
registerServer(cmd.RootCmd, logger)
|
||||
cmd.RegisterGenerate(cmd.RootCmd, logger)
|
||||
|
||||
cmd.Execute(logger)
|
||||
}
|
||||
|
||||
@@ -6,17 +6,31 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/cmd"
|
||||
"github.com/SigNoz/signoz/ee/authn/callbackauthn/oidccallbackauthn"
|
||||
"github.com/SigNoz/signoz/ee/authn/callbackauthn/samlcallbackauthn"
|
||||
"github.com/SigNoz/signoz/ee/authz/openfgaauthz"
|
||||
"github.com/SigNoz/signoz/ee/authz/openfgaschema"
|
||||
"github.com/SigNoz/signoz/ee/gateway/httpgateway"
|
||||
enterpriselicensing "github.com/SigNoz/signoz/ee/licensing"
|
||||
"github.com/SigNoz/signoz/ee/licensing/httplicensing"
|
||||
"github.com/SigNoz/signoz/ee/modules/dashboard/impldashboard"
|
||||
enterpriseapp "github.com/SigNoz/signoz/ee/query-service/app"
|
||||
"github.com/SigNoz/signoz/ee/sqlschema/postgressqlschema"
|
||||
"github.com/SigNoz/signoz/ee/sqlstore/postgressqlstore"
|
||||
enterprisezeus "github.com/SigNoz/signoz/ee/zeus"
|
||||
"github.com/SigNoz/signoz/ee/zeus/httpzeus"
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/authn"
|
||||
"github.com/SigNoz/signoz/pkg/authz"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/gateway"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
pkgimpldashboard "github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/role"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
@@ -54,17 +68,14 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
|
||||
|
||||
// add enterprise sqlstore factories to the community sqlstore factories
|
||||
sqlstoreFactories := signoz.NewSQLStoreProviderFactories()
|
||||
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory())); err != nil {
|
||||
if err := sqlstoreFactories.Add(postgressqlstore.NewFactory(sqlstorehook.NewLoggingFactory(), sqlstorehook.NewInstrumentationFactory())); err != nil {
|
||||
logger.ErrorContext(ctx, "failed to add postgressqlstore factory", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
jwt := authtypes.NewJWT(cmd.NewJWTSecret(ctx, logger), 30*time.Minute, 30*24*time.Hour)
|
||||
|
||||
signoz, err := signoz.New(
|
||||
ctx,
|
||||
config,
|
||||
jwt,
|
||||
enterprisezeus.Config(),
|
||||
httpzeus.NewProviderFactory(),
|
||||
enterpriselicensing.Config(24*time.Hour, 3),
|
||||
@@ -84,13 +95,43 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
|
||||
},
|
||||
sqlstoreFactories,
|
||||
signoz.NewTelemetryStoreProviderFactories(),
|
||||
func(ctx context.Context, providerSettings factory.ProviderSettings, store authtypes.AuthNStore, licensing licensing.Licensing) (map[authtypes.AuthNProvider]authn.AuthN, error) {
|
||||
samlCallbackAuthN, err := samlcallbackauthn.New(ctx, store, licensing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oidcCallbackAuthN, err := oidccallbackauthn.New(store, licensing, providerSettings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
authNs, err := signoz.NewAuthNs(ctx, providerSettings, store, licensing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
authNs[authtypes.AuthNProviderSAML] = samlCallbackAuthN
|
||||
authNs[authtypes.AuthNProviderOIDC] = oidcCallbackAuthN
|
||||
|
||||
return authNs, nil
|
||||
},
|
||||
func(ctx context.Context, sqlstore sqlstore.SQLStore) factory.ProviderFactory[authz.AuthZ, authz.Config] {
|
||||
return openfgaauthz.NewProviderFactory(sqlstore, openfgaschema.NewSchema().Get(ctx))
|
||||
},
|
||||
func(store sqlstore.SQLStore, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, role role.Module, queryParser queryparser.QueryParser, querier querier.Querier, licensing licensing.Licensing) dashboard.Module {
|
||||
return impldashboard.NewModule(pkgimpldashboard.NewStore(store), settings, analytics, orgGetter, role, queryParser, querier, licensing)
|
||||
},
|
||||
func(licensing licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config] {
|
||||
return httpgateway.NewProviderFactory(licensing)
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create signoz", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
server, err := enterpriseapp.NewServer(config, signoz, jwt)
|
||||
server, err := enterpriseapp.NewServer(config, signoz)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "failed to create server", "error", err)
|
||||
return err
|
||||
|
||||
21
cmd/generate.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func RegisterGenerate(parentCmd *cobra.Command, logger *slog.Logger) {
|
||||
var generateCmd = &cobra.Command{
|
||||
Use: "generate",
|
||||
Short: "Generate artifacts",
|
||||
SilenceUsage: true,
|
||||
SilenceErrors: true,
|
||||
CompletionOptions: cobra.CompletionOptions{DisableDefaultCmd: true},
|
||||
}
|
||||
|
||||
registerGenerateOpenAPI(generateCmd)
|
||||
|
||||
parentCmd.AddCommand(generateCmd)
|
||||
}
|
||||
41
cmd/openapi.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func registerGenerateOpenAPI(parentCmd *cobra.Command) {
|
||||
openapiCmd := &cobra.Command{
|
||||
Use: "openapi",
|
||||
Short: "Generate OpenAPI schema for SigNoz",
|
||||
RunE: func(currCmd *cobra.Command, args []string) error {
|
||||
return runGenerateOpenAPI(currCmd.Context())
|
||||
},
|
||||
}
|
||||
|
||||
parentCmd.AddCommand(openapiCmd)
|
||||
}
|
||||
|
||||
func runGenerateOpenAPI(ctx context.Context) error {
|
||||
instrumentation, err := instrumentation.New(ctx, instrumentation.Config{Logs: instrumentation.LogsConfig{Level: slog.LevelInfo}}, version.Info, "signoz")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
openapi, err := signoz.NewOpenAPI(ctx, instrumentation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := openapi.CreateAndWrite("docs/api/openapi.yml"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
97
cmd/zap.go
@@ -1,6 +1,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"go.uber.org/zap" //nolint:depguard
|
||||
"go.uber.org/zap/zapcore" //nolint:depguard
|
||||
)
|
||||
@@ -10,6 +14,97 @@ func newZapLogger() *zap.Logger {
|
||||
config := zap.NewProductionConfig()
|
||||
config.EncoderConfig.TimeKey = "timestamp"
|
||||
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
|
||||
|
||||
// Extract sampling config before building the logger.
|
||||
// We need to disable sampling in the config and apply it manually later
|
||||
// to ensure correct core ordering. See filteringCore documentation for details.
|
||||
samplerConfig := config.Sampling
|
||||
config.Sampling = nil
|
||||
|
||||
logger, _ := config.Build()
|
||||
return logger
|
||||
|
||||
// Wrap with custom core wrapping to filter certain log entries.
|
||||
// The order of wrapping is important:
|
||||
// 1. First wrap with filteringCore
|
||||
// 2. Then wrap with sampler
|
||||
//
|
||||
// This creates the call chain: sampler -> filteringCore -> ioCore
|
||||
//
|
||||
// During logging:
|
||||
// - sampler.Check decides whether to sample the log entry
|
||||
// - If sampled, filteringCore.Check is called
|
||||
// - filteringCore adds itself to CheckedEntry.cores
|
||||
// - All cores in CheckedEntry.cores have their Write method called
|
||||
// - filteringCore.Write can now filter the entry before passing to ioCore
|
||||
//
|
||||
// If we didn't disable the sampler above, filteringCore would have wrapped
|
||||
// sampler. By calling sampler.Check we would have allowed it to call
|
||||
// ioCore.Check that adds itself to CheckedEntry.cores. Then ioCore.Write
|
||||
// would have bypassed our checks, making filtering impossible.
|
||||
return logger.WithOptions(zap.WrapCore(func(core zapcore.Core) zapcore.Core {
|
||||
core = &filteringCore{core}
|
||||
if samplerConfig != nil {
|
||||
core = zapcore.NewSamplerWithOptions(
|
||||
core,
|
||||
time.Second,
|
||||
samplerConfig.Initial,
|
||||
samplerConfig.Thereafter,
|
||||
)
|
||||
}
|
||||
return core
|
||||
}))
|
||||
}
|
||||
|
||||
// filteringCore wraps a zapcore.Core to filter out log entries based on a
|
||||
// custom logic.
|
||||
//
|
||||
// Note: This core must be positioned before the sampler in the core chain
|
||||
// to ensure Write is called. See newZapLogger for ordering details.
|
||||
type filteringCore struct {
|
||||
zapcore.Core
|
||||
}
|
||||
|
||||
// filter determines whether a log entry should be written based on its fields.
|
||||
// Returns false if the entry should be suppressed, true otherwise.
|
||||
//
|
||||
// Current filters:
|
||||
// - context.Canceled: These are expected errors from cancelled operations,
|
||||
// and create noise in logs.
|
||||
func (c *filteringCore) filter(fields []zapcore.Field) bool {
|
||||
for _, field := range fields {
|
||||
if field.Type == zapcore.ErrorType {
|
||||
if loggedErr, ok := field.Interface.(error); ok {
|
||||
// Suppress logs containing context.Canceled errors
|
||||
if errors.Is(loggedErr, context.Canceled) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// With implements zapcore.Core.With
|
||||
// It returns a new copy with the added context.
|
||||
func (c *filteringCore) With(fields []zapcore.Field) zapcore.Core {
|
||||
return &filteringCore{c.Core.With(fields)}
|
||||
}
|
||||
|
||||
// Check implements zapcore.Core.Check.
|
||||
// It adds this core to the CheckedEntry if the log level is enabled,
|
||||
// ensuring that Write will be called for this entry.
|
||||
func (c *filteringCore) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
|
||||
if c.Enabled(ent.Level) {
|
||||
return ce.AddCore(ent, c)
|
||||
}
|
||||
return ce
|
||||
}
|
||||
|
||||
// Write implements zapcore.Core.Write.
|
||||
// It filters log entries based on their fields before delegating to the wrapped core.
|
||||
func (c *filteringCore) Write(ent zapcore.Entry, fields []zapcore.Field) error {
|
||||
if !c.filter(fields) {
|
||||
return nil
|
||||
}
|
||||
return c.Core.Write(ent, fields)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,15 @@
|
||||
##################### SigNoz Configuration Example #####################
|
||||
#
|
||||
#
|
||||
# Do not modify this file
|
||||
#
|
||||
|
||||
##################### Global #####################
|
||||
global:
|
||||
# the url under which the signoz apiserver is externally reachable.
|
||||
external_url: <unset>
|
||||
# the url where the SigNoz backend receives telemetry data (traces, metrics, logs) from instrumented applications.
|
||||
ingestion_url: <unset>
|
||||
|
||||
##################### Version #####################
|
||||
version:
|
||||
banner:
|
||||
@@ -47,10 +54,10 @@ cache:
|
||||
provider: memory
|
||||
# memory: Uses in-memory caching.
|
||||
memory:
|
||||
# Time-to-live for cache entries in memory. Specify the duration in ns
|
||||
ttl: 60000000000
|
||||
# The interval at which the cache will be cleaned up
|
||||
cleanup_interval: 1m
|
||||
# Max items for the in-memory cache (10x the entries)
|
||||
num_counters: 100000
|
||||
# Total cost in bytes allocated bounded cache
|
||||
max_cost: 67108864
|
||||
# redis: Uses Redis as the caching backend.
|
||||
redis:
|
||||
# The hostname or IP address of the Redis server.
|
||||
@@ -58,7 +65,7 @@ cache:
|
||||
# The port on which the Redis server is running. Default is usually 6379.
|
||||
port: 6379
|
||||
# The password for authenticating with the Redis server, if required.
|
||||
password:
|
||||
password:
|
||||
# The Redis database number to use
|
||||
db: 0
|
||||
|
||||
@@ -71,6 +78,10 @@ sqlstore:
|
||||
sqlite:
|
||||
# The path to the SQLite database file.
|
||||
path: /var/lib/signoz/signoz.db
|
||||
# Mode is the mode to use for the sqlite database.
|
||||
mode: delete
|
||||
# BusyTimeout is the timeout for the sqlite database to wait for a lock.
|
||||
busy_timeout: 10s
|
||||
|
||||
##################### APIServer #####################
|
||||
apiserver:
|
||||
@@ -238,8 +249,45 @@ statsreporter:
|
||||
# Whether to collect identities and traits (emails).
|
||||
identities: true
|
||||
|
||||
|
||||
##################### Gateway (License only) #####################
|
||||
gateway:
|
||||
# The URL of the gateway's api.
|
||||
url: http://localhost:8080
|
||||
|
||||
##################### Tokenizer #####################
|
||||
tokenizer:
|
||||
# Specifies the tokenizer provider to use.
|
||||
provider: jwt
|
||||
lifetime:
|
||||
# The duration for which a user can be idle before being required to authenticate.
|
||||
idle: 168h
|
||||
# The duration for which a user can remain logged in before being asked to login.
|
||||
max: 720h
|
||||
rotation:
|
||||
# The interval to rotate tokens in.
|
||||
interval: 30m
|
||||
# The duration for which the previous token pair remains valid after a token pair is rotated.
|
||||
duration: 60s
|
||||
jwt:
|
||||
# The secret to sign the JWT tokens.
|
||||
secret: secret
|
||||
opaque:
|
||||
gc:
|
||||
# The interval to perform garbage collection.
|
||||
interval: 1h
|
||||
token:
|
||||
# The maximum number of tokens a user can have. This limits the number of concurrent sessions a user can have.
|
||||
max_per_user: 5
|
||||
|
||||
##################### Flagger #####################
|
||||
flagger:
|
||||
# Config are the overrides for the feature flags which come directly from the config file.
|
||||
config:
|
||||
boolean:
|
||||
use_span_metrics: true
|
||||
interpolation_enabled: false
|
||||
kafka_span_eval: false
|
||||
string:
|
||||
float:
|
||||
integer:
|
||||
object:
|
||||
|
||||
4
context7.json
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"url": "https://context7.com/signoz/signoz",
|
||||
"public_key": "pk_6g9GfjdkuPEIDuTGAxnol"
|
||||
}
|
||||
@@ -176,7 +176,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.96.1
|
||||
image: signoz/signoz:v0.108.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
@@ -195,7 +195,7 @@ services:
|
||||
- GODEBUG=netdns=go
|
||||
- TELEMETRY_ENABLED=true
|
||||
- DEPLOYMENT_TYPE=docker-swarm
|
||||
- SIGNOZ_JWT_SECRET=secret
|
||||
- SIGNOZ_TOKENIZER_JWT_SECRET=secret
|
||||
- DOT_METRICS_ENABLED=true
|
||||
healthcheck:
|
||||
test:
|
||||
@@ -209,7 +209,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.129.6
|
||||
image: signoz/signoz-otel-collector:v0.129.12
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -233,7 +233,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.129.6
|
||||
image: signoz/signoz-schema-migrator:v0.129.12
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -117,7 +117,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:v0.96.1
|
||||
image: signoz/signoz:v0.108.0
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
ports:
|
||||
@@ -150,7 +150,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:v0.129.6
|
||||
image: signoz/signoz-otel-collector:v0.129.12
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
- --manager-config=/etc/manager-config.yaml
|
||||
@@ -176,7 +176,7 @@ services:
|
||||
- signoz
|
||||
schema-migrator:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:v0.129.6
|
||||
image: signoz/signoz-schema-migrator:v0.129.12
|
||||
deploy:
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
|
||||
@@ -1,3 +1,10 @@
|
||||
connectors:
|
||||
signozmeter:
|
||||
metrics_flush_interval: 1h
|
||||
dimensions:
|
||||
- name: service.name
|
||||
- name: deployment.environment
|
||||
- name: host.name
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
@@ -21,6 +28,10 @@ processors:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
batch/meter:
|
||||
send_batch_max_size: 25000
|
||||
send_batch_size: 20000
|
||||
timeout: 1s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system]
|
||||
@@ -66,6 +77,11 @@ exporters:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
signozclickhousemeter:
|
||||
dsn: tcp://clickhouse:9000/signoz_meter
|
||||
timeout: 45s
|
||||
sending_queue:
|
||||
enabled: false
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
@@ -77,16 +93,20 @@ service:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces]
|
||||
exporters: [clickhousetraces, signozmeter]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics]
|
||||
exporters: [signozclickhousemetrics, signozmeter]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics]
|
||||
exporters: [signozclickhousemetrics, signozmeter]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
exporters: [clickhouselogsexporter, signozmeter]
|
||||
metrics/meter:
|
||||
receivers: [signozmeter]
|
||||
processors: [batch/meter]
|
||||
exporters: [signozclickhousemeter]
|
||||
|
||||
@@ -179,7 +179,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.96.1}
|
||||
image: signoz/signoz:${VERSION:-v0.108.0}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -213,7 +213,7 @@ services:
|
||||
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.6}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.12}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -239,7 +239,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.12}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -250,7 +250,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.12}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -111,7 +111,7 @@ services:
|
||||
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
|
||||
signoz:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz:${VERSION:-v0.96.1}
|
||||
image: signoz/signoz:${VERSION:-v0.108.0}
|
||||
container_name: signoz
|
||||
command:
|
||||
- --config=/root/config/prometheus.yml
|
||||
@@ -144,7 +144,7 @@ services:
|
||||
retries: 3
|
||||
otel-collector:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.6}
|
||||
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.12}
|
||||
container_name: signoz-otel-collector
|
||||
command:
|
||||
- --config=/etc/otel-collector-config.yaml
|
||||
@@ -166,7 +166,7 @@ services:
|
||||
condition: service_healthy
|
||||
schema-migrator-sync:
|
||||
!!merge <<: *common
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.12}
|
||||
container_name: schema-migrator-sync
|
||||
command:
|
||||
- sync
|
||||
@@ -178,7 +178,7 @@ services:
|
||||
restart: on-failure
|
||||
schema-migrator-async:
|
||||
!!merge <<: *db-depend
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
|
||||
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.12}
|
||||
container_name: schema-migrator-async
|
||||
command:
|
||||
- async
|
||||
|
||||
@@ -1,3 +1,10 @@
|
||||
connectors:
|
||||
signozmeter:
|
||||
metrics_flush_interval: 1h
|
||||
dimensions:
|
||||
- name: service.name
|
||||
- name: deployment.environment
|
||||
- name: host.name
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
@@ -21,6 +28,10 @@ processors:
|
||||
send_batch_size: 10000
|
||||
send_batch_max_size: 11000
|
||||
timeout: 10s
|
||||
batch/meter:
|
||||
send_batch_max_size: 25000
|
||||
send_batch_size: 20000
|
||||
timeout: 1s
|
||||
resourcedetection:
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
detectors: [env, system]
|
||||
@@ -66,6 +77,11 @@ exporters:
|
||||
dsn: tcp://clickhouse:9000/signoz_logs
|
||||
timeout: 10s
|
||||
use_new_schema: true
|
||||
signozclickhousemeter:
|
||||
dsn: tcp://clickhouse:9000/signoz_meter
|
||||
timeout: 45s
|
||||
sending_queue:
|
||||
enabled: false
|
||||
service:
|
||||
telemetry:
|
||||
logs:
|
||||
@@ -77,16 +93,20 @@ service:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics/delta, batch]
|
||||
exporters: [clickhousetraces]
|
||||
exporters: [clickhousetraces, signozmeter]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics]
|
||||
exporters: [signozclickhousemetrics, signozmeter]
|
||||
metrics/prometheus:
|
||||
receivers: [prometheus]
|
||||
processors: [batch]
|
||||
exporters: [signozclickhousemetrics]
|
||||
exporters: [signozclickhousemetrics, signozmeter]
|
||||
logs:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouselogsexporter]
|
||||
exporters: [clickhouselogsexporter, signozmeter]
|
||||
metrics/meter:
|
||||
receivers: [signozmeter]
|
||||
processors: [batch/meter]
|
||||
exporters: [signozclickhousemeter]
|
||||
|
||||
4063
docs/api/openapi.yml
Normal file
@@ -13,8 +13,6 @@ Before diving in, make sure you have these tools installed:
|
||||
- Download from [go.dev/dl](https://go.dev/dl/)
|
||||
- Check [go.mod](../../go.mod#L3) for the minimum version
|
||||
|
||||
- **GCC** - Required for CGO dependencies
|
||||
- Download from [gcc.gnu.org](https://gcc.gnu.org/)
|
||||
|
||||
- **Node** - Powers our frontend
|
||||
- Download from [nodejs.org](https://nodejs.org)
|
||||
|
||||
134
docs/contributing/go/flagger.md
Normal file
@@ -0,0 +1,134 @@
|
||||
# Flagger
|
||||
|
||||
Flagger is SigNoz's feature flagging system built on top of the [OpenFeature](https://openfeature.dev/) standard. It provides a unified interface for evaluating feature flags across the application, allowing features to be enabled, disabled, or configured dynamically without code changes.
|
||||
|
||||
> 💡 **Note**: OpenFeature is a CNCF project that provides a vendor-agnostic feature flagging API, making it easy to switch providers without changing application code.
|
||||
|
||||
## How does it work?
|
||||
|
||||
Flagger consists of three main components:
|
||||
|
||||
1. **Registry** (`pkg/flagger/registry.go`) - Contains all available feature flags with their metadata and default values
|
||||
2. **Flagger** (`pkg/flagger/flagger.go`) - The consumer-facing interface for evaluating feature flags
|
||||
3. **Providers** (`pkg/flagger/<provider>flagger/`) - Implementations that supply feature flag values (e.g., `configflagger` for config-based flags)
|
||||
|
||||
The evaluation flow works as follows:
|
||||
|
||||
1. The caller requests a feature flag value via the `Flagger` interface
|
||||
2. Flagger checks the registry to validate the flag exists and get its default value
|
||||
3. Each registered provider is queried for an override value
|
||||
4. If a provider returns a value different from the default, that value is returned
|
||||
5. Otherwise, the default value from the registry is returned
|
||||
|
||||
## How to add a new feature flag?
|
||||
|
||||
### 1. Register the flag in the registry
|
||||
|
||||
Add your feature flag definition in `pkg/flagger/registry.go`:
|
||||
|
||||
```go
|
||||
var (
|
||||
// Export the feature name for use in evaluations
|
||||
FeatureMyNewFeature = featuretypes.MustNewName("my_new_feature")
|
||||
)
|
||||
|
||||
func MustNewRegistry() featuretypes.Registry {
|
||||
registry, err := featuretypes.NewRegistry(
|
||||
// ...existing features...
|
||||
&featuretypes.Feature{
|
||||
Name: FeatureMyNewFeature,
|
||||
Kind: featuretypes.KindBoolean, // or KindString, KindFloat, KindInt, KindObject
|
||||
Stage: featuretypes.StageStable, // or StageAlpha, StageBeta
|
||||
Description: "Controls whether my new feature is enabled",
|
||||
DefaultVariant: featuretypes.MustNewName("disabled"),
|
||||
Variants: featuretypes.NewBooleanVariants(),
|
||||
},
|
||||
)
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
> 💡 **Note**: Feature names must match the regex `^[a-z_]+$` (lowercase letters and underscores only).
|
||||
|
||||
### 2. Configure the feature flag value (optional)
|
||||
|
||||
To override the default value, add an entry in your configuration file:
|
||||
|
||||
```yaml
|
||||
flagger:
|
||||
config:
|
||||
boolean:
|
||||
my_new_feature: true
|
||||
```
|
||||
|
||||
Supported configuration types:
|
||||
|
||||
| Type | Config Key | Go Type |
|
||||
|------|------------|---------|
|
||||
| Boolean | `boolean` | `bool` |
|
||||
| String | `string` | `string` |
|
||||
| Float | `float` | `float64` |
|
||||
| Integer | `integer` | `int64` |
|
||||
| Object | `object` | `any` |
|
||||
|
||||
## How to evaluate a feature flag?
|
||||
|
||||
Use the `Flagger` interface to evaluate feature flags. The interface provides typed methods for each value type:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
)
|
||||
|
||||
func DoSomething(ctx context.Context, flagger flagger.Flagger) error {
|
||||
// Create an evaluation context (typically with org ID)
|
||||
evalCtx := featuretypes.NewFlaggerEvaluationContext(orgID)
|
||||
|
||||
// Evaluate with error handling
|
||||
enabled, err := flagger.Boolean(ctx, flagger.FeatureMyNewFeature, evalCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if enabled {
|
||||
// Feature is enabled
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Empty variants
|
||||
|
||||
For cases where you want to use a default value on error (and log the error), use the `*OrEmpty` methods:
|
||||
|
||||
```go
|
||||
func DoSomething(ctx context.Context, flagger flagger.Flagger) {
|
||||
evalCtx := featuretypes.NewFlaggerEvaluationContext(orgID)
|
||||
|
||||
// Returns false on error and logs the error
|
||||
if flagger.BooleanOrEmpty(ctx, flagger.FeatureMyNewFeature, evalCtx) {
|
||||
// Feature is enabled
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Available evaluation methods
|
||||
|
||||
| Method | Return Type | Empty Variant Default |
|
||||
|--------|-------------|---------------------|
|
||||
| `Boolean()` | `(bool, error)` | `false` |
|
||||
| `String()` | `(string, error)` | `""` |
|
||||
| `Float()` | `(float64, error)` | `0.0` |
|
||||
| `Int()` | `(int64, error)` | `0` |
|
||||
| `Object()` | `(any, error)` | `struct{}{}` |
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- Always define feature flags in the registry (`pkg/flagger/registry.go`) before using them
|
||||
- Use descriptive feature names that clearly indicate what the flag controls
|
||||
- Prefer `*OrEmpty` methods for non-critical features to avoid error handling overhead
|
||||
- Export feature name variables (e.g., `FeatureMyNewFeature`) for type-safe usage across packages
|
||||
- Consider the feature's lifecycle stage (`Alpha`, `Beta`, `Stable`) when defining it
|
||||
- Providers are evaluated in order; the first non-default value wins
|
||||
179
docs/contributing/go/handler.md
Normal file
@@ -0,0 +1,179 @@
|
||||
# Handler
|
||||
|
||||
Handlers in SigNoz are responsible for exposing module functionality over HTTP. They are thin adapters that:
|
||||
|
||||
- Decode incoming HTTP requests
|
||||
- Call the appropriate module layer
|
||||
- Return structured responses (or errors) in a consistent format
|
||||
- Describe themselves for OpenAPI generation
|
||||
|
||||
They are **not** the place for complex business logic; that belongs in modules (for example, `pkg/modules/user`, `pkg/modules/session`, etc).
|
||||
|
||||
## How are handlers structured?
|
||||
|
||||
At a high level, a typical flow looks like this:
|
||||
|
||||
1. A `Handler` interface is defined in the module (for example, `user.Handler`, `session.Handler`, `organization.Handler`).
|
||||
2. The `apiserver` provider wires those handlers into HTTP routes using Gorilla `mux.Router`.
|
||||
|
||||
Each route wraps a module handler method with the following:
|
||||
- Authorization middleware (from `pkg/http/middleware`)
|
||||
- A generic HTTP `handler.Handler` (from `pkg/http/handler`)
|
||||
- An `OpenAPIDef` that describes the operation for OpenAPI generation
|
||||
|
||||
For example, in `pkg/apiserver/signozapiserver`:
|
||||
|
||||
```go
|
||||
if err := router.Handle("/api/v1/invite", handler.New(
|
||||
provider.authZ.AdminAccess(provider.userHandler.CreateInvite),
|
||||
handler.OpenAPIDef{
|
||||
ID: "CreateInvite",
|
||||
Tags: []string{"users"},
|
||||
Summary: "Create invite",
|
||||
Description: "This endpoint creates an invite for a user",
|
||||
Request: new(types.PostableInvite),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(types.Invite),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusCreated,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusConflict},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
|
||||
},
|
||||
)).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
In this pattern:
|
||||
|
||||
- `provider.userHandler.CreateInvite` is a handler method.
|
||||
- `provider.authZ.AdminAccess(...)` wraps that method with authorization checks and context setup.
|
||||
- `handler.New` converts it into an HTTP handler and wires it to OpenAPI via the `OpenAPIDef`.
|
||||
|
||||
## How to write a new handler method?
|
||||
|
||||
When adding a new endpoint:
|
||||
|
||||
1. Add a method to the appropriate module `Handler` interface.
|
||||
2. Implement that method in the module.
|
||||
3. Register the method in `signozapiserver` with the correct route, HTTP method, auth, and `OpenAPIDef`.
|
||||
|
||||
### 1. Extend an existing `Handler` interface or create a new one
|
||||
|
||||
Find the module in `pkg/modules/<name>` and extend its `Handler` interface with a new method that receives an `http.ResponseWriter` and `*http.Request`. For example:
|
||||
|
||||
```go
|
||||
type Handler interface {
|
||||
// existing methods...
|
||||
CreateThing(rw http.ResponseWriter, req *http.Request)
|
||||
}
|
||||
```
|
||||
|
||||
Keep the method focused on HTTP concerns and delegate business logic to the module.
|
||||
|
||||
### 2. Implement the handler method
|
||||
|
||||
In the module implementation, implement the new method. A typical implementation:
|
||||
|
||||
- Extracts authentication and organization context from `req.Context()`
|
||||
- Decodes the request body into a `types.*` struct using the `binding` package
|
||||
- Calls module functions
|
||||
- Uses the `render` package to write responses or errors
|
||||
|
||||
```go
|
||||
func (h *handler) CreateThing(rw http.ResponseWriter, req *http.Request) {
|
||||
// Extract authentication and organization context from req.Context()
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Decode the request body into a `types.*` struct using the `binding` package
|
||||
var in types.PostableThing
|
||||
if err := binding.JSON.BindBody(req.Body, &in); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Call module functions
|
||||
out, err := h.module.CreateThing(req.Context(), claims.OrgID, &in)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Use the `render` package to write responses or errors
|
||||
render.Success(rw, http.StatusCreated, out)
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Register the handler in `signozapiserver`
|
||||
|
||||
In `pkg/apiserver/signozapiserver`, add a route in the appropriate `add*Routes` function (`addUserRoutes`, `addSessionRoutes`, `addOrgRoutes`, etc.). The pattern is:
|
||||
|
||||
```go
|
||||
if err := router.Handle("/api/v1/things", handler.New(
|
||||
provider.authZ.AdminAccess(provider.thingHandler.CreateThing),
|
||||
handler.OpenAPIDef{
|
||||
ID: "CreateThing",
|
||||
Tags: []string{"things"},
|
||||
Summary: "Create thing",
|
||||
Description: "This endpoint creates a thing",
|
||||
Request: new(types.PostableThing),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(types.GettableThing),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusCreated,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusConflict},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleAdmin),
|
||||
},
|
||||
)).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Update the OpenAPI spec
|
||||
|
||||
Run the following command to update the OpenAPI spec:
|
||||
|
||||
```bash
|
||||
go run cmd/enterprise/*.go generate openapi
|
||||
```
|
||||
|
||||
This will update the OpenAPI spec in `docs/api/openapi.yml` to reflect the new endpoint.
|
||||
|
||||
## How does OpenAPI integration work?
|
||||
|
||||
The `handler.New` function ties the HTTP handler to OpenAPI metadata via `OpenAPIDef`. This drives the generated OpenAPI document.
|
||||
|
||||
- **ID**: A unique identifier for the operation (used as the `operationId`).
|
||||
- **Tags**: Logical grouping for the operation (for example, `"users"`, `"sessions"`, `"orgs"`).
|
||||
- **Summary / Description**: Human-friendly documentation.
|
||||
- **Request / RequestContentType**:
|
||||
- `Request` is a Go type that describes the request body or form.
|
||||
- `RequestContentType` is usually `"application/json"` or `"application/x-www-form-urlencoded"` (for callbacks like SAML).
|
||||
- **Response / ResponseContentType**:
|
||||
- `Response` is the Go type for the successful response payload.
|
||||
- `ResponseContentType` is usually `"application/json"`; use `""` for responses without a body.
|
||||
- **SuccessStatusCode**: The HTTP status for successful responses (for example, `http.StatusOK`, `http.StatusCreated`, `http.StatusNoContent`).
|
||||
- **ErrorStatusCodes**: Additional error status codes beyond the standard ones automatically added by `handler.New`.
|
||||
- **SecuritySchemes**: Auth mechanisms and scopes required by the operation.
|
||||
|
||||
The generic handler:
|
||||
|
||||
- Automatically appends `401`, `403`, and `500` to `ErrorStatusCodes` when appropriate.
|
||||
- Registers request and response schemas with the OpenAPI reflector so they appear in `docs/api/openapi.yml`.
|
||||
|
||||
See existing examples in:
|
||||
|
||||
- `addUserRoutes` (for typical JSON request/response)
|
||||
- `addSessionRoutes` (for form-encoded and redirect flows)
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- **Keep handlers thin**: focus on HTTP concerns and delegate logic to modules/services.
|
||||
- **Always register routes through `signozapiserver`** using `handler.New` and a complete `OpenAPIDef`.
|
||||
- **Choose accurate request/response types** from the `types` packages so OpenAPI schemas are correct.
|
||||
@@ -9,7 +9,7 @@ SigNoz uses integration tests to verify that different components work together
|
||||
Before running integration tests, ensure you have the following installed:
|
||||
|
||||
- Python 3.13+
|
||||
- Poetry (for dependency management)
|
||||
- [uv](https://docs.astral.sh/uv/getting-started/installation/)
|
||||
- Docker (for containerized services)
|
||||
|
||||
### Initial Setup
|
||||
@@ -19,17 +19,19 @@ Before running integration tests, ensure you have the following installed:
|
||||
cd tests/integration
|
||||
```
|
||||
|
||||
2. Install dependencies using Poetry:
|
||||
2. Install dependencies using uv:
|
||||
```bash
|
||||
poetry install --no-root
|
||||
uv sync
|
||||
```
|
||||
|
||||
> **_NOTE:_** the build backend could throw an error while installing `psycopg2`, pleae see https://www.psycopg.org/docs/install.html#build-prerequisites
|
||||
|
||||
### Starting the Test Environment
|
||||
|
||||
To spin up all the containers necessary for writing integration tests and keep them running:
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/bootstrap/setup.py::test_setup
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse src/bootstrap/setup.py::test_setup
|
||||
```
|
||||
|
||||
This command will:
|
||||
@@ -42,7 +44,7 @@ This command will:
|
||||
When you're done writing integration tests, clean up the environment:
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --teardown -s src/bootstrap/setup.py::test_teardown
|
||||
uv run pytest --basetemp=./tmp/ -vv --teardown -s src/bootstrap/setup.py::test_teardown
|
||||
```
|
||||
|
||||
This will destroy the running integration test setup and clean up resources.
|
||||
@@ -72,20 +74,20 @@ Python and pytest form the foundation of the integration testing framework. Test
|
||||
│ ├── sqlite.py
|
||||
│ ├── types.py
|
||||
│ └── zookeeper.py
|
||||
├── poetry.lock
|
||||
├── uv.lock
|
||||
├── pyproject.toml
|
||||
└── src
|
||||
└── bootstrap
|
||||
├── __init__.py
|
||||
├── a_database.py
|
||||
├── b_register.py
|
||||
└── c_license.py
|
||||
├── 01_database.py
|
||||
├── 02_register.py
|
||||
└── 03_license.py
|
||||
```
|
||||
|
||||
Each test suite follows some important principles:
|
||||
|
||||
1. **Organization**: Test suites live under `src/` in self-contained packages. Fixtures (a pytest concept) live inside `fixtures/`.
|
||||
2. **Execution Order**: Files are prefixed with `a_`, `b_`, `c_` to ensure sequential execution.
|
||||
2. **Execution Order**: Files are prefixed with two-digit numbers (`01_`, `02_`, `03_`) to ensure sequential execution.
|
||||
3. **Time Constraints**: Each suite should complete in under 10 minutes (setup takes ~4 mins).
|
||||
|
||||
### Test Suite Design
|
||||
@@ -107,7 +109,7 @@ Other test suites can be **pipelines, auth, querier.**
|
||||
|
||||
## How to write an integration test?
|
||||
|
||||
Now start writing an integration test. Create a new file `src/bootstrap/e_version.py` and paste the following:
|
||||
Now start writing an integration test. Create a new file `src/bootstrap/05_version.py` and paste the following:
|
||||
|
||||
```python
|
||||
import requests
|
||||
@@ -125,7 +127,7 @@ def test_version(signoz: types.SigNoz) -> None:
|
||||
We have written a simple test which calls the `version` endpoint of the container in step 1. In **order to just run this function, run the following command:**
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/bootstrap/e_version.py::test_version
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse src/bootstrap/05_version.py::test_version
|
||||
```
|
||||
|
||||
> Note: The `--reuse` flag is used to reuse the environment if it is already running. Always use this flag when writing and running integration tests. If you don't use this flag, the environment will be destroyed and recreated every time you run the test.
|
||||
@@ -153,7 +155,7 @@ def test_user_registration(signoz: types.SigNoz) -> None:
|
||||
},
|
||||
timeout=2,
|
||||
)
|
||||
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["setupCompleted"] is True
|
||||
```
|
||||
@@ -163,27 +165,27 @@ def test_user_registration(signoz: types.SigNoz) -> None:
|
||||
### Running All Tests
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse src/
|
||||
```
|
||||
|
||||
### Running Specific Test Categories
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/<suite>
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse src/<suite>
|
||||
|
||||
# Run querier tests
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/querier/
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse src/querier/
|
||||
# Run auth tests
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/auth/
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse src/auth/
|
||||
```
|
||||
|
||||
### Running Individual Tests
|
||||
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/<suite>/<file>.py::test_name
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse src/<suite>/<file>.py::test_name
|
||||
|
||||
# Run test_register in file a_register.py in auth suite
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse src/auth/a_register.py::test_register
|
||||
# Run test_register in file 01_register.py in passwordauthn suite
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse src/passwordauthn/01_register.py::test_register
|
||||
```
|
||||
|
||||
## How to configure different options for integration tests?
|
||||
@@ -197,7 +199,7 @@ Tests can be configured using pytest options:
|
||||
|
||||
Example:
|
||||
```bash
|
||||
poetry run pytest --basetemp=./tmp/ -vv --reuse --sqlstore-provider=postgres --postgres-version=14 src/auth/
|
||||
uv run pytest --basetemp=./tmp/ -vv --reuse --sqlstore-provider=postgres --postgres-version=14 src/auth/
|
||||
```
|
||||
|
||||
|
||||
@@ -205,7 +207,7 @@ poetry run pytest --basetemp=./tmp/ -vv --reuse --sqlstore-provider=postgres --p
|
||||
|
||||
- **Always use the `--reuse` flag** when setting up the environment to keep containers running
|
||||
- **Use the `--teardown` flag** when cleaning up to avoid resource leaks
|
||||
- **Follow the naming convention** with alphabetical prefixes for test execution order
|
||||
- **Follow the naming convention** with two-digit numeric prefixes (`01_`, `02_`) for test execution order
|
||||
- **Use proper timeouts** in HTTP requests to avoid hanging tests
|
||||
- **Clean up test data** between tests to avoid interference
|
||||
- **Use descriptive test names** that clearly indicate what is being tested
|
||||
|
||||
301
docs/contributing/onboarding.md
Normal file
@@ -0,0 +1,301 @@
|
||||
# Onboarding Configuration Guide
|
||||
|
||||
This guide explains how to add new data sources to the SigNoz onboarding flow. The onboarding configuration controls the "New Source" / "Get Started" experience in SigNoz Cloud.
|
||||
|
||||
## Configuration File Location
|
||||
|
||||
The configuration is located at:
|
||||
|
||||
```
|
||||
frontend/src/container/OnboardingV2Container/onboarding-configs/onboarding-config-with-links.json
|
||||
```
|
||||
|
||||
## JSON Structure Overview
|
||||
|
||||
The configuration file is a JSON array containing data source objects. Each object represents a selectable option in the onboarding flow.
|
||||
|
||||
## Data Source Object Keys
|
||||
|
||||
### Required Keys
|
||||
|
||||
| Key | Type | Description |
|
||||
|-----|------|-------------|
|
||||
| `dataSource` | `string` | Unique identifier for the data source (kebab-case, e.g., `"aws-ec2"`) |
|
||||
| `label` | `string` | Display name shown to users (e.g., `"AWS EC2"`) |
|
||||
| `tags` | `string[]` | Array of category tags for grouping (e.g., `["AWS"]`, `["database"]`) |
|
||||
| `module` | `string` | Destination module after onboarding completion |
|
||||
| `imgUrl` | `string` | Path to the logo/icon (e.g., `"/Logos/ec2.svg"`) |
|
||||
|
||||
### Optional Keys
|
||||
|
||||
| Key | Type | Description |
|
||||
|-----|------|-------------|
|
||||
| `link` | `string` | Docs link to redirect to (e.g., `"/docs/aws-monitoring/ec2/"`) |
|
||||
| `relatedSearchKeywords` | `string[]` | Array of keywords for search functionality |
|
||||
| `question` | `object` | Nested question object for multi-step flows |
|
||||
| `internalRedirect` | `boolean` | When `true`, navigates within the app instead of showing docs |
|
||||
|
||||
## Module Values
|
||||
|
||||
The `module` key determines where users are redirected after completing onboarding:
|
||||
|
||||
| Value | Destination |
|
||||
|-------|-------------|
|
||||
| `apm` | APM / Traces |
|
||||
| `logs` | Logs Explorer |
|
||||
| `metrics` | Metrics Explorer |
|
||||
| `dashboards` | Dashboards |
|
||||
| `infra-monitoring-hosts` | Infrastructure Monitoring - Hosts |
|
||||
| `infra-monitoring-k8s` | Infrastructure Monitoring - Kubernetes |
|
||||
| `messaging-queues-kafka` | Messaging Queues - Kafka |
|
||||
| `messaging-queues-celery` | Messaging Queues - Celery |
|
||||
| `integrations` | Integrations page |
|
||||
| `home` | Home page |
|
||||
| `api-monitoring` | API Monitoring |
|
||||
|
||||
## Question Object Structure
|
||||
|
||||
The `question` object enables multi-step selection flows:
|
||||
|
||||
```json
|
||||
{
|
||||
"question": {
|
||||
"desc": "What would you like to monitor?",
|
||||
"type": "select",
|
||||
"helpText": "Choose the telemetry type you want to collect.",
|
||||
"helpLink": "/docs/azure-monitoring/overview/",
|
||||
"helpLinkText": "Read the guide →",
|
||||
"options": [
|
||||
{
|
||||
"key": "logging",
|
||||
"label": "Logs",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/logging/"
|
||||
},
|
||||
{
|
||||
"key": "metrics",
|
||||
"label": "Metrics",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/metrics/"
|
||||
},
|
||||
{
|
||||
"key": "tracing",
|
||||
"label": "Traces",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/tracing/"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Question Keys
|
||||
|
||||
| Key | Type | Description |
|
||||
|-----|------|-------------|
|
||||
| `desc` | `string` | Question text displayed to the user |
|
||||
| `type` | `string` | Currently only `"select"` is supported |
|
||||
| `helpText` | `string` | (Optional) Additional help text below the question |
|
||||
| `helpLink` | `string` | (Optional) Docs link for the help section |
|
||||
| `helpLinkText` | `string` | (Optional) Text for the help link (default: "Learn more →") |
|
||||
| `options` | `array` | Array of option objects |
|
||||
|
||||
## Option Object Structure
|
||||
|
||||
Options can be simple (direct link) or nested (with another question):
|
||||
|
||||
### Simple Option (Direct Link)
|
||||
|
||||
```json
|
||||
{
|
||||
"key": "aws-ec2-logs",
|
||||
"label": "Logs",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"link": "/docs/userguide/collect_logs_from_file/"
|
||||
}
|
||||
```
|
||||
|
||||
### Option with Internal Redirect
|
||||
|
||||
```json
|
||||
{
|
||||
"key": "aws-ec2-metrics-one-click",
|
||||
"label": "One Click AWS",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"link": "/integrations?integration=aws-integration&service=ec2",
|
||||
"internalRedirect": true
|
||||
}
|
||||
```
|
||||
|
||||
> **Important**: Set `internalRedirect: true` only for internal app routes (like `/integrations?...`). Docs links should NOT have this flag.
|
||||
|
||||
### Nested Option (Multi-step Flow)
|
||||
|
||||
```json
|
||||
{
|
||||
"key": "aws-ec2-metrics",
|
||||
"label": "Metrics",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"question": {
|
||||
"desc": "How would you like to set up monitoring?",
|
||||
"helpText": "Choose your setup method.",
|
||||
"options": [...]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Simple Data Source (Direct Link)
|
||||
|
||||
```json
|
||||
{
|
||||
"dataSource": "aws-elb",
|
||||
"label": "AWS ELB",
|
||||
"tags": ["AWS"],
|
||||
"module": "logs",
|
||||
"relatedSearchKeywords": [
|
||||
"aws",
|
||||
"aws elb",
|
||||
"elb logs",
|
||||
"elastic load balancer"
|
||||
],
|
||||
"imgUrl": "/Logos/elb.svg",
|
||||
"link": "/docs/aws-monitoring/elb/"
|
||||
}
|
||||
```
|
||||
|
||||
### Data Source with Single Question Level
|
||||
|
||||
```json
|
||||
{
|
||||
"dataSource": "app-service",
|
||||
"label": "App Service",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"tags": ["Azure"],
|
||||
"module": "apm",
|
||||
"relatedSearchKeywords": ["azure", "app service"],
|
||||
"question": {
|
||||
"desc": "What telemetry data do you want to visualise?",
|
||||
"type": "select",
|
||||
"options": [
|
||||
{
|
||||
"key": "logging",
|
||||
"label": "Logs",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/logging/"
|
||||
},
|
||||
{
|
||||
"key": "metrics",
|
||||
"label": "Metrics",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/metrics/"
|
||||
},
|
||||
{
|
||||
"key": "tracing",
|
||||
"label": "Traces",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/tracing/"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Data Source with Nested Questions (2-3 Levels)
|
||||
|
||||
```json
|
||||
{
|
||||
"dataSource": "aws-ec2",
|
||||
"label": "AWS EC2",
|
||||
"tags": ["AWS"],
|
||||
"module": "logs",
|
||||
"relatedSearchKeywords": ["aws", "aws ec2", "ec2 logs", "ec2 metrics"],
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"question": {
|
||||
"desc": "What would you like to monitor for AWS EC2?",
|
||||
"type": "select",
|
||||
"helpText": "Choose the type of telemetry data you want to collect.",
|
||||
"options": [
|
||||
{
|
||||
"key": "aws-ec2-logs",
|
||||
"label": "Logs",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"link": "/docs/userguide/collect_logs_from_file/"
|
||||
},
|
||||
{
|
||||
"key": "aws-ec2-metrics",
|
||||
"label": "Metrics",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"question": {
|
||||
"desc": "How would you like to set up EC2 Metrics monitoring?",
|
||||
"helpText": "One Click uses AWS CloudWatch integration. Manual setup uses OpenTelemetry.",
|
||||
"helpLink": "/docs/aws-monitoring/one-click-vs-manual/",
|
||||
"helpLinkText": "Read the comparison guide →",
|
||||
"options": [
|
||||
{
|
||||
"key": "aws-ec2-metrics-one-click",
|
||||
"label": "One Click AWS",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"link": "/integrations?integration=aws-integration&service=ec2",
|
||||
"internalRedirect": true
|
||||
},
|
||||
{
|
||||
"key": "aws-ec2-metrics-manual",
|
||||
"label": "Manual Setup",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"link": "/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Tags
|
||||
|
||||
- Use existing tags when possible: `AWS`, `Azure`, `GCP`, `database`, `logs`, `apm/traces`, `infrastructure monitoring`, `LLM Monitoring`
|
||||
- Tags are used for grouping in the sidebar
|
||||
- Every data source must have at least one tag
|
||||
|
||||
### 2. Search Keywords
|
||||
|
||||
- Include variations of the name (e.g., `"aws ec2"`, `"ec2"`, `"ec2 logs"`)
|
||||
- Include common misspellings or alternative names
|
||||
- Keep keywords lowercase and alphabetically sorted
|
||||
|
||||
### 3. Logos
|
||||
|
||||
- Place logo files in `public/Logos/`
|
||||
- Use SVG format
|
||||
- Reference as `"/Logos/your-logo.svg"`
|
||||
|
||||
### 4. Links
|
||||
|
||||
- Docs links should start with `/docs/` (will be prefixed with DOCS_BASE_URL)
|
||||
- Internal app links should start with `/integrations`, `/services`, etc.
|
||||
- Only use `internalRedirect: true` for internal app routes
|
||||
|
||||
### 5. Keys
|
||||
|
||||
- Use kebab-case for `dataSource` and option `key` values
|
||||
- Make keys descriptive and unique
|
||||
- Follow the pattern: `{service}-{subtype}-{action}` (e.g., `aws-ec2-metrics-one-click`)
|
||||
|
||||
## Adding a New Data Source
|
||||
|
||||
1. Add your data source object to the JSON array
|
||||
2. Ensure the logo exists in `public/Logos/`
|
||||
3. Test the flow locally with `yarn dev`
|
||||
4. Validation:
|
||||
- Navigate to the [onboarding page](http://localhost:3301/get-started-with-signoz-cloud) on your local machine
|
||||
- Data source appears in the list
|
||||
- Search keywords work correctly
|
||||
- All links redirect to the correct pages
|
||||
- Questions display correct help text and links
|
||||
- Tags are used for grouping in the UI sidebar
|
||||
- Clicking on Configure redirects to the correct page
|
||||
292
docs/implementation/EXTERNAL_API_MONITORING.md
Normal file
@@ -0,0 +1,292 @@
|
||||
# External API Monitoring - Developer Guide
|
||||
|
||||
## Overview
|
||||
|
||||
External API Monitoring tracks outbound HTTP calls from your services to external APIs. It groups spans by domain (e.g., `api.example.com`) and displays metrics like endpoint count, request rate, error rate, latency, and last seen time.
|
||||
|
||||
**Key Requirement**: Spans must have `kind_string = 'Client'` and either `http.url`/`url.full` AND `net.peer.name`/`server.address` attributes.
|
||||
|
||||
---
|
||||
|
||||
## Architecture Flow
|
||||
|
||||
```
|
||||
Frontend (DomainList)
|
||||
→ useListOverview hook
|
||||
→ POST /api/v1/third-party-apis/overview/list
|
||||
→ getDomainList handler
|
||||
→ BuildDomainList (7 queries)
|
||||
→ QueryRange (ClickHouse)
|
||||
→ Post-processing (merge semconv, filter IPs)
|
||||
→ formatDataForTable
|
||||
→ UI Display
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Key APIs
|
||||
|
||||
### 1. Domain List API
|
||||
|
||||
**Endpoint**: `POST /api/v1/third-party-apis/overview/list`
|
||||
|
||||
**Request**:
|
||||
```json
|
||||
{
|
||||
"start": 1699123456789, // Unix timestamp (ms)
|
||||
"end": 1699127056789,
|
||||
"show_ip": false, // Filter IP addresses
|
||||
"filter": {
|
||||
"expression": "kind_string = 'Client' AND service.name = 'api'"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Response**: Table with columns:
|
||||
- `net.peer.name` (domain name)
|
||||
- `endpoints` (count_distinct with fallback: http.url or url.full)
|
||||
- `rps` (rate())
|
||||
- `error_rate` (formula: error/total_span * 100)
|
||||
- `p99` (p99(duration_nano))
|
||||
- `lastseen` (max(timestamp))
|
||||
|
||||
**Handler**: `pkg/query-service/app/http_handler.go::getDomainList()`
|
||||
|
||||
---
|
||||
|
||||
### 2. Domain Info API
|
||||
|
||||
**Endpoint**: `POST /api/v1/third-party-apis/overview/domain`
|
||||
|
||||
**Request**: Same as Domain List, but includes `domain` field
|
||||
|
||||
**Response**: Endpoint-level metrics for a specific domain
|
||||
|
||||
**Handler**: `pkg/query-service/app/http_handler.go::getDomainInfo()`
|
||||
|
||||
---
|
||||
|
||||
## Query Building
|
||||
|
||||
### Location
|
||||
`pkg/modules/thirdpartyapi/translator.go`
|
||||
|
||||
### BuildDomainList() - Creates 7 Sub-queries
|
||||
|
||||
1. **endpoints**: `count_distinct(if(http.url exists, http.url, url.full))` - Unique endpoint count (handles both semconv attributes)
|
||||
2. **lastseen**: `max(timestamp)` - Last access time
|
||||
3. **rps**: `rate()` - Requests per second
|
||||
4. **error**: `count() WHERE has_error = true` - Error count
|
||||
5. **total_span**: `count()` - Total spans (for error rate)
|
||||
6. **p99**: `p99(duration_nano)` - 99th percentile latency
|
||||
7. **error_rate**: Formula `(error/total_span)*100`
|
||||
|
||||
### Base Filter
|
||||
```go
|
||||
"(http.url EXISTS OR url.full EXISTS) AND kind_string = 'Client'"
|
||||
```
|
||||
|
||||
### GroupBy
|
||||
- Groups by `server.address` + `net.peer.name` (dual semconv support)
|
||||
|
||||
---
|
||||
|
||||
## Key Files
|
||||
|
||||
### Frontend
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `frontend/src/container/ApiMonitoring/Explorer/Domains/DomainList.tsx` | Main list view component |
|
||||
| `frontend/src/container/ApiMonitoring/Explorer/Domains/DomainDetails/DomainDetails.tsx` | Domain details drawer |
|
||||
| `frontend/src/hooks/thirdPartyApis/useListOverview.ts` | Data fetching hook |
|
||||
| `frontend/src/api/thirdPartyApis/listOverview.ts` | API client |
|
||||
| `frontend/src/container/ApiMonitoring/utils.tsx` | Utilities (formatting, query building) |
|
||||
|
||||
### Backend
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `pkg/query-service/app/http_handler.go` | API handlers (`getDomainList`, `getDomainInfo`) |
|
||||
| `pkg/modules/thirdpartyapi/translator.go` | Query builder & response processing |
|
||||
| `pkg/types/thirdpartyapitypes/thirdpartyapi.go` | Request/response types |
|
||||
|
||||
---
|
||||
|
||||
## Data Tables
|
||||
|
||||
### Primary Table
|
||||
- **Table**: `signoz_traces.distributed_signoz_index_v3`
|
||||
- **Key Columns**:
|
||||
- `kind_string` - Filter for `'Client'` spans
|
||||
- `duration_nano` - For latency calculations
|
||||
- `has_error` - For error rate
|
||||
- `timestamp` - For last seen
|
||||
- `attributes_string` - Map containing `http.url`, `net.peer.name`, etc.
|
||||
- `resources_string` - Map containing `server.address`, `service.name`, etc.
|
||||
|
||||
### Attribute Access
|
||||
```sql
|
||||
-- Check existence
|
||||
mapContains(attributes_string, 'http.url') = 1
|
||||
|
||||
-- Get value
|
||||
attributes_string['http.url']
|
||||
|
||||
-- Materialized (if exists)
|
||||
attribute_string_http$$url
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Post-Processing
|
||||
|
||||
### 1. MergeSemconvColumns()
|
||||
- Merges `server.address` and `net.peer.name` into single column
|
||||
- Location: `pkg/modules/thirdpartyapi/translator.go:117`
|
||||
|
||||
### 2. FilterIntermediateColumns()
|
||||
- Removes intermediate formula columns from response
|
||||
- Location: `pkg/modules/thirdpartyapi/translator.go:70`
|
||||
|
||||
### 3. FilterResponse()
|
||||
- Filters out IP addresses if `show_ip = false`
|
||||
- Uses `net.ParseIP()` to detect IPs
|
||||
- Location: `pkg/modules/thirdpartyapi/translator.go:214`
|
||||
|
||||
---
|
||||
|
||||
## Required Attributes
|
||||
|
||||
### For Domain Grouping
|
||||
- `net.peer.name` OR `server.address` (required)
|
||||
|
||||
### For Filtering
|
||||
- `http.url` OR `url.full` (required)
|
||||
- `kind_string = 'Client'` (required)
|
||||
|
||||
### Not Required
|
||||
- `http.target` - Not used in external API monitoring
|
||||
|
||||
### Known Bug
|
||||
The `buildEndpointsQuery()` uses `count_distinct(http.url)` but filter allows `url.full`. If spans only have `url.full`, they pass filter but don't contribute to endpoint count.
|
||||
|
||||
**Fix Needed**: Update aggregation to handle both attributes:
|
||||
```go
|
||||
// Current (buggy)
|
||||
{Expression: "count_distinct(http.url)"}
|
||||
|
||||
// Should be
|
||||
{Expression: "count_distinct(coalesce(http.url, url.full))"}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Frontend Data Flow
|
||||
|
||||
### 1. Domain List View
|
||||
```
|
||||
DomainList component
|
||||
→ useListOverview({ start, end, show_ip, filter })
|
||||
→ listOverview API call
|
||||
→ formatDataForTable(response)
|
||||
→ Table display
|
||||
```
|
||||
|
||||
### 2. Domain Details View
|
||||
```
|
||||
User clicks domain
|
||||
→ DomainDetails drawer opens
|
||||
→ Multiple queries:
|
||||
- DomainMetrics (overview cards)
|
||||
- AllEndpoints (endpoint table)
|
||||
- TopErrors (error table)
|
||||
- EndPointDetails (when endpoint selected)
|
||||
```
|
||||
|
||||
### 3. Data Formatting
|
||||
- `formatDataForTable()` - Converts API response to table format
|
||||
- Handles `n/a` values, converts nanoseconds to milliseconds
|
||||
- Maps column names to display fields
|
||||
|
||||
---
|
||||
|
||||
## Query Examples
|
||||
|
||||
### Domain List Query
|
||||
```sql
|
||||
SELECT
|
||||
multiIf(
|
||||
mapContains(attributes_string, 'server.address'),
|
||||
attributes_string['server.address'],
|
||||
mapContains(attributes_string, 'net.peer.name'),
|
||||
attributes_string['net.peer.name'],
|
||||
NULL
|
||||
) AS domain,
|
||||
count_distinct(attributes_string['http.url']) AS endpoints,
|
||||
rate() AS rps,
|
||||
p99(duration_nano) AS p99,
|
||||
max(timestamp) AS lastseen
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
(mapContains(attributes_string, 'http.url') = 1
|
||||
OR mapContains(attributes_string, 'url.full') = 1)
|
||||
AND kind_string = 'Client'
|
||||
AND timestamp >= ? AND timestamp < ?
|
||||
GROUP BY domain
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
### Key Test Files
|
||||
- `frontend/src/container/ApiMonitoring/__tests__/AllEndpointsWidgetV5Migration.test.tsx`
|
||||
- `frontend/src/container/ApiMonitoring/__tests__/EndpointDropdownV5Migration.test.tsx`
|
||||
- `pkg/modules/thirdpartyapi/translator_test.go`
|
||||
|
||||
### Test Scenarios
|
||||
1. Domain filtering with both semconv attributes
|
||||
2. URL handling (http.url vs url.full)
|
||||
3. IP address filtering
|
||||
4. Error rate calculation
|
||||
5. Empty state handling
|
||||
|
||||
---
|
||||
|
||||
## Common Issues
|
||||
|
||||
### Empty State
|
||||
**Symptom**: No domains shown despite data existing
|
||||
|
||||
**Causes**:
|
||||
1. Missing `net.peer.name` or `server.address`
|
||||
2. Missing `http.url` or `url.full`
|
||||
3. Spans not marked as `kind_string = 'Client'`
|
||||
4. Bug: Only `url.full` present but query uses `count_distinct(http.url)`
|
||||
|
||||
### Performance
|
||||
- Queries use `ts_bucket_start` for time partitioning
|
||||
- Resource filtering uses separate `distributed_traces_v3_resource` table
|
||||
- Materialized columns improve performance for common attributes
|
||||
|
||||
---
|
||||
|
||||
## Quick Start Checklist
|
||||
|
||||
- [ ] Understand trace table schema (`signoz_index_v3`)
|
||||
- [ ] Review `BuildDomainList()` in `translator.go`
|
||||
- [ ] Check `getDomainList()` handler in `http_handler.go`
|
||||
- [ ] Review frontend `DomainList.tsx` component
|
||||
- [ ] Understand semconv attribute mapping (legacy vs current)
|
||||
- [ ] Test with spans that have required attributes
|
||||
- [ ] Review post-processing functions (merge, filter)
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- **Trace Schema**: `pkg/telemetrytraces/field_mapper.go`
|
||||
- **Query Builder**: `pkg/telemetrytraces/statement_builder.go`
|
||||
- **API Routes**: `pkg/query-service/app/http_handler.go:2157`
|
||||
- **Constants**: `pkg/modules/thirdpartyapi/translator.go:14-20`
|
||||
980
docs/implementation/QUERY_RANGE_API.md
Normal file
@@ -0,0 +1,980 @@
|
||||
# Query Range API (V5) - Developer Guide
|
||||
|
||||
This document provides a comprehensive guide to the Query Range API (V5), which is the primary query endpoint for traces, logs, and metrics in SigNoz. It covers architecture, request/response models, code flows, and implementation details.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Overview](#overview)
|
||||
2. [API Endpoint](#api-endpoint)
|
||||
3. [Request/Response Models](#requestresponse-models)
|
||||
4. [Query Types](#query-types)
|
||||
5. [Request Types](#request-types)
|
||||
6. [Code Flow](#code-flow)
|
||||
7. [Key Components](#key-components)
|
||||
8. [Query Execution](#query-execution)
|
||||
9. [Caching](#caching)
|
||||
10. [Result Processing](#result-processing)
|
||||
11. [Performance Considerations](#performance-considerations)
|
||||
12. [Extending the API](#extending-the-api)
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The Query Range API (V5) is the unified query endpoint for all telemetry signals (traces, logs, metrics) in SigNoz. It provides:
|
||||
|
||||
- **Unified Interface**: Single endpoint for all signal types
|
||||
- **Query Builder**: Visual query builder support
|
||||
- **Multiple Query Types**: Builder queries, PromQL, ClickHouse SQL, Formulas, Trace Operators
|
||||
- **Flexible Response Types**: Time series, scalar, raw data, trace-specific
|
||||
- **Advanced Features**: Aggregations, filters, group by, ordering, pagination
|
||||
- **Caching**: Intelligent caching for performance
|
||||
|
||||
### Key Technologies
|
||||
|
||||
- **Backend**: Go (Golang)
|
||||
- **Storage**: ClickHouse (columnar database)
|
||||
- **Query Language**: Custom query builder + PromQL + ClickHouse SQL
|
||||
- **Protocol**: HTTP/REST API
|
||||
|
||||
---
|
||||
|
||||
## API Endpoint
|
||||
|
||||
### Endpoint Details
|
||||
|
||||
**URL**: `POST /api/v5/query_range`
|
||||
|
||||
**Handler**: `QuerierAPI.QueryRange` → `querier.QueryRange`
|
||||
|
||||
**Location**:
|
||||
- Handler: `pkg/querier/querier.go:122`
|
||||
- Route Registration: `pkg/query-service/app/http_handler.go:480`
|
||||
|
||||
**Authentication**: Requires ViewAccess permission
|
||||
|
||||
**Content-Type**: `application/json`
|
||||
|
||||
### Request Flow
|
||||
|
||||
```
|
||||
HTTP Request (POST /api/v5/query_range)
|
||||
↓
|
||||
HTTP Handler (QuerierAPI.QueryRange)
|
||||
↓
|
||||
Querier.QueryRange (pkg/querier/querier.go)
|
||||
↓
|
||||
Query Execution (Statement Builders → ClickHouse)
|
||||
↓
|
||||
Result Processing & Merging
|
||||
↓
|
||||
HTTP Response (QueryRangeResponse)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Request/Response Models
|
||||
|
||||
### Request Model
|
||||
|
||||
**Location**: `pkg/types/querybuildertypes/querybuildertypesv5/req.go`
|
||||
|
||||
```go
|
||||
type QueryRangeRequest struct {
|
||||
Start uint64 // Start timestamp (milliseconds)
|
||||
End uint64 // End timestamp (milliseconds)
|
||||
RequestType RequestType // Response type (TimeSeries, Scalar, Raw, Trace)
|
||||
Variables map[string]VariableItem // Template variables
|
||||
CompositeQuery CompositeQuery // Container for queries
|
||||
NoCache bool // Skip cache flag
|
||||
}
|
||||
```
|
||||
|
||||
### Composite Query
|
||||
|
||||
```go
|
||||
type CompositeQuery struct {
|
||||
Queries []QueryEnvelope // Array of queries to execute
|
||||
}
|
||||
```
|
||||
|
||||
### Query Envelope
|
||||
|
||||
```go
|
||||
type QueryEnvelope struct {
|
||||
Type QueryType // Query type (Builder, PromQL, ClickHouseSQL, Formula, TraceOperator)
|
||||
Spec any // Query specification (type-specific)
|
||||
}
|
||||
```
|
||||
|
||||
### Response Model
|
||||
|
||||
**Location**: `pkg/types/querybuildertypes/querybuildertypesv5/req.go`
|
||||
|
||||
```go
|
||||
type QueryRangeResponse struct {
|
||||
Type RequestType // Response type
|
||||
Data QueryData // Query results
|
||||
Meta ExecStats // Execution statistics
|
||||
Warning *QueryWarnData // Warnings (if any)
|
||||
QBEvent *QBEvent // Query builder event metadata
|
||||
}
|
||||
|
||||
type QueryData struct {
|
||||
Results []any // Array of result objects (type depends on RequestType)
|
||||
}
|
||||
|
||||
type ExecStats struct {
|
||||
RowsScanned uint64 // Total rows scanned
|
||||
BytesScanned uint64 // Total bytes scanned
|
||||
DurationMS uint64 // Query duration in milliseconds
|
||||
StepIntervals map[string]uint64 // Step intervals per query
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Query Types
|
||||
|
||||
The API supports multiple query types, each with its own specification format.
|
||||
|
||||
### 1. Builder Query (`QueryTypeBuilder`)
|
||||
|
||||
Visual query builder queries. Supports traces, logs, and metrics.
|
||||
|
||||
**Spec Type**: `QueryBuilderQuery[T]` where T is:
|
||||
- `TraceAggregation` for traces
|
||||
- `LogAggregation` for logs
|
||||
- `MetricAggregation` for metrics
|
||||
|
||||
**Example**:
|
||||
```go
|
||||
QueryBuilderQuery[TraceAggregation] {
|
||||
Name: "query_name",
|
||||
Signal: SignalTraces,
|
||||
Filter: &Filter {
|
||||
Expression: "service.name = 'api' AND duration_nano > 1000000",
|
||||
},
|
||||
Aggregations: []TraceAggregation {
|
||||
{Expression: "count()", Alias: "total"},
|
||||
{Expression: "avg(duration_nano)", Alias: "avg_duration"},
|
||||
},
|
||||
GroupBy: []GroupByKey {...},
|
||||
Order: []OrderBy {...},
|
||||
Limit: 100,
|
||||
}
|
||||
```
|
||||
|
||||
**Key Files**:
|
||||
- Traces: `pkg/telemetrytraces/statement_builder.go`
|
||||
- Logs: `pkg/telemetrylogs/statement_builder.go`
|
||||
- Metrics: `pkg/telemetrymetrics/statement_builder.go`
|
||||
|
||||
### 2. PromQL Query (`QueryTypePromQL`)
|
||||
|
||||
Prometheus Query Language queries for metrics.
|
||||
|
||||
**Spec Type**: `PromQuery`
|
||||
|
||||
**Example**:
|
||||
```go
|
||||
PromQuery {
|
||||
Query: "rate(http_requests_total[5m])",
|
||||
Step: Step{Duration: time.Minute},
|
||||
}
|
||||
```
|
||||
|
||||
**Key Files**: `pkg/querier/promql_query.go`
|
||||
|
||||
### 3. ClickHouse SQL Query (`QueryTypeClickHouseSQL`)
|
||||
|
||||
Direct ClickHouse SQL queries.
|
||||
|
||||
**Spec Type**: `ClickHouseQuery`
|
||||
|
||||
**Example**:
|
||||
```go
|
||||
ClickHouseQuery {
|
||||
Query: "SELECT count() FROM signoz_traces.distributed_signoz_index_v3 WHERE ...",
|
||||
}
|
||||
```
|
||||
|
||||
**Key Files**: `pkg/querier/ch_sql_query.go`
|
||||
|
||||
### 4. Formula Query (`QueryTypeFormula`)
|
||||
|
||||
Mathematical formulas combining other queries.
|
||||
|
||||
**Spec Type**: `QueryBuilderFormula`
|
||||
|
||||
**Example**:
|
||||
```go
|
||||
QueryBuilderFormula {
|
||||
Expression: "A / B * 100", // A and B are query names
|
||||
}
|
||||
```
|
||||
|
||||
**Key Files**: `pkg/querier/formula_query.go`
|
||||
|
||||
### 5. Trace Operator Query (`QueryTypeTraceOperator`)
|
||||
|
||||
Set operations on trace queries (AND, OR, NOT).
|
||||
|
||||
**Spec Type**: `QueryBuilderTraceOperator`
|
||||
|
||||
**Example**:
|
||||
```go
|
||||
QueryBuilderTraceOperator {
|
||||
Expression: "A AND B", // A and B are query names
|
||||
Filter: &Filter {...},
|
||||
}
|
||||
```
|
||||
|
||||
**Key Files**:
|
||||
- `pkg/telemetrytraces/trace_operator_statement_builder.go`
|
||||
- `pkg/querier/trace_operator_query.go`
|
||||
|
||||
---
|
||||
|
||||
## Request Types
|
||||
|
||||
The `RequestType` determines the format of the response data.
|
||||
|
||||
### 1. `RequestTypeTimeSeries`
|
||||
|
||||
Returns time series data for charts.
|
||||
|
||||
**Response Format**: `TimeSeriesData`
|
||||
|
||||
```go
|
||||
type TimeSeriesData struct {
|
||||
QueryName string
|
||||
Aggregations []AggregationBucket
|
||||
}
|
||||
|
||||
type AggregationBucket struct {
|
||||
Index int
|
||||
Series []TimeSeries
|
||||
Alias string
|
||||
Meta AggregationMeta
|
||||
}
|
||||
|
||||
type TimeSeries struct {
|
||||
Labels map[string]string
|
||||
Values []TimeSeriesValue
|
||||
}
|
||||
|
||||
type TimeSeriesValue struct {
|
||||
Timestamp int64
|
||||
Value float64
|
||||
}
|
||||
```
|
||||
|
||||
**Use Case**: Line charts, bar charts, area charts
|
||||
|
||||
### 2. `RequestTypeScalar`
|
||||
|
||||
Returns a single scalar value.
|
||||
|
||||
**Response Format**: `ScalarData`
|
||||
|
||||
```go
|
||||
type ScalarData struct {
|
||||
QueryName string
|
||||
Data []ScalarValue
|
||||
}
|
||||
|
||||
type ScalarValue struct {
|
||||
Timestamp int64
|
||||
Value float64
|
||||
}
|
||||
```
|
||||
|
||||
**Use Case**: Single value displays, stat panels
|
||||
|
||||
### 3. `RequestTypeRaw`
|
||||
|
||||
Returns raw data rows.
|
||||
|
||||
**Response Format**: `RawData`
|
||||
|
||||
```go
|
||||
type RawData struct {
|
||||
QueryName string
|
||||
Columns []string
|
||||
Rows []RawDataRow
|
||||
}
|
||||
|
||||
type RawDataRow struct {
|
||||
Timestamp time.Time
|
||||
Data map[string]any
|
||||
}
|
||||
```
|
||||
|
||||
**Use Case**: Tables, logs viewer, trace lists
|
||||
|
||||
### 4. `RequestTypeTrace`
|
||||
|
||||
Returns trace-specific data structure.
|
||||
|
||||
**Response Format**: Trace-specific format (see traces documentation)
|
||||
|
||||
**Use Case**: Trace-specific visualizations
|
||||
|
||||
---
|
||||
|
||||
## Code Flow
|
||||
|
||||
### Complete Request Flow
|
||||
|
||||
```
|
||||
1. HTTP Request
|
||||
POST /api/v5/query_range
|
||||
Body: QueryRangeRequest JSON
|
||||
↓
|
||||
2. HTTP Handler
|
||||
QuerierAPI.QueryRange (pkg/querier/querier.go)
|
||||
- Validates request
|
||||
- Extracts organization ID from auth context
|
||||
↓
|
||||
3. Querier.QueryRange (pkg/querier/querier.go:122)
|
||||
- Validates QueryRangeRequest
|
||||
- Processes each query in CompositeQuery.Queries
|
||||
- Identifies dependencies (e.g., trace operators, formulas)
|
||||
- Calculates step intervals
|
||||
- Fetches metric temporality if needed
|
||||
↓
|
||||
4. Query Creation
|
||||
For each QueryEnvelope:
|
||||
|
||||
a. Builder Query:
|
||||
- newBuilderQuery() creates builderQuery instance
|
||||
- Selects appropriate statement builder based on signal:
|
||||
* Traces → traceStmtBuilder
|
||||
* Logs → logStmtBuilder
|
||||
* Metrics → metricStmtBuilder or meterStmtBuilder
|
||||
↓
|
||||
|
||||
b. PromQL Query:
|
||||
- newPromqlQuery() creates promqlQuery instance
|
||||
- Uses Prometheus engine
|
||||
↓
|
||||
|
||||
c. ClickHouse SQL Query:
|
||||
- newchSQLQuery() creates chSQLQuery instance
|
||||
- Direct SQL execution
|
||||
↓
|
||||
|
||||
d. Formula Query:
|
||||
- newFormulaQuery() creates formulaQuery instance
|
||||
- References other queries by name
|
||||
↓
|
||||
|
||||
e. Trace Operator Query:
|
||||
- newTraceOperatorQuery() creates traceOperatorQuery instance
|
||||
- Uses traceOperatorStmtBuilder
|
||||
↓
|
||||
5. Statement Building (for Builder queries)
|
||||
StatementBuilder.Build()
|
||||
- Resolves field keys from metadata store
|
||||
- Builds SQL based on request type:
|
||||
* RequestTypeRaw → buildListQuery()
|
||||
* RequestTypeTimeSeries → buildTimeSeriesQuery()
|
||||
* RequestTypeScalar → buildScalarQuery()
|
||||
* RequestTypeTrace → buildTraceQuery()
|
||||
- Returns SQL statement with arguments
|
||||
↓
|
||||
6. Query Execution
|
||||
Query.Execute()
|
||||
- Executes SQL/query against ClickHouse or Prometheus
|
||||
- Processes results into response format
|
||||
- Returns Result with data and statistics
|
||||
↓
|
||||
7. Caching (if applicable)
|
||||
- Checks bucket cache for time series queries
|
||||
- Executes queries for missing time ranges
|
||||
- Merges cached and fresh results
|
||||
↓
|
||||
8. Result Processing
|
||||
querier.run()
|
||||
- Executes all queries (with dependency resolution)
|
||||
- Collects results and warnings
|
||||
- Merges results from multiple queries
|
||||
↓
|
||||
9. Post-Processing
|
||||
postProcessResults()
|
||||
- Applies formulas if present
|
||||
- Handles variable substitution
|
||||
- Formats results for response
|
||||
↓
|
||||
10. HTTP Response
|
||||
- Returns QueryRangeResponse with results
|
||||
- Includes execution statistics
|
||||
- Includes warnings if any
|
||||
```
|
||||
|
||||
### Key Decision Points
|
||||
|
||||
1. **Query Type Selection**: Based on `QueryEnvelope.Type`
|
||||
2. **Signal Selection**: For builder queries, based on `Signal` field
|
||||
3. **Request Type Handling**: Different SQL generation for different request types
|
||||
4. **Caching Strategy**: Only for time series queries with valid fingerprints
|
||||
5. **Dependency Resolution**: Trace operators and formulas resolve dependencies first
|
||||
|
||||
---
|
||||
|
||||
## Key Components
|
||||
|
||||
### 1. Querier
|
||||
|
||||
**Location**: `pkg/querier/querier.go`
|
||||
|
||||
**Purpose**: Orchestrates query execution, caching, and result merging
|
||||
|
||||
**Key Methods**:
|
||||
- `QueryRange()`: Main entry point for query execution
|
||||
- `run()`: Executes queries and merges results
|
||||
- `executeWithCache()`: Handles caching logic
|
||||
- `mergeResults()`: Merges cached and fresh results
|
||||
- `postProcessResults()`: Applies formulas and variable substitution
|
||||
|
||||
**Key Features**:
|
||||
- Query orchestration across multiple query types
|
||||
- Intelligent caching with bucket-based strategy
|
||||
- Result merging from multiple queries
|
||||
- Formula evaluation
|
||||
- Time range optimization
|
||||
- Step interval calculation and validation
|
||||
|
||||
### 2. Statement Builder Interface
|
||||
|
||||
**Location**: `pkg/types/querybuildertypes/querybuildertypesv5/`
|
||||
|
||||
**Purpose**: Converts query builder specifications into executable queries
|
||||
|
||||
**Interface**:
|
||||
```go
|
||||
type StatementBuilder[T any] interface {
|
||||
Build(
|
||||
ctx context.Context,
|
||||
start uint64,
|
||||
end uint64,
|
||||
requestType RequestType,
|
||||
query QueryBuilderQuery[T],
|
||||
variables map[string]VariableItem,
|
||||
) (*Statement, error)
|
||||
}
|
||||
```
|
||||
|
||||
**Implementations**:
|
||||
- `traceQueryStatementBuilder` - Traces (`pkg/telemetrytraces/statement_builder.go`)
|
||||
- `logQueryStatementBuilder` - Logs (`pkg/telemetrylogs/statement_builder.go`)
|
||||
- `metricQueryStatementBuilder` - Metrics (`pkg/telemetrymetrics/statement_builder.go`)
|
||||
|
||||
**Key Features**:
|
||||
- Field resolution via metadata store
|
||||
- SQL generation for different request types
|
||||
- Filter, aggregation, group by, ordering support
|
||||
- Time range optimization
|
||||
|
||||
### 3. Query Interface
|
||||
|
||||
**Location**: `pkg/types/querybuildertypes/querybuildertypesv5/`
|
||||
|
||||
**Purpose**: Represents an executable query
|
||||
|
||||
**Interface**:
|
||||
```go
|
||||
type Query interface {
|
||||
Execute(ctx context.Context) (*Result, error)
|
||||
Fingerprint() string // For caching
|
||||
Window() (uint64, uint64) // Time range
|
||||
}
|
||||
```
|
||||
|
||||
**Implementations**:
|
||||
- `builderQuery[T]` - Builder queries (`pkg/querier/builder_query.go`)
|
||||
- `promqlQuery` - PromQL queries (`pkg/querier/promql_query.go`)
|
||||
- `chSQLQuery` - ClickHouse SQL queries (`pkg/querier/ch_sql_query.go`)
|
||||
- `formulaQuery` - Formula queries (`pkg/querier/formula_query.go`)
|
||||
- `traceOperatorQuery` - Trace operator queries (`pkg/querier/trace_operator_query.go`)
|
||||
|
||||
### 4. Telemetry Store
|
||||
|
||||
**Location**: `pkg/telemetrystore/`
|
||||
|
||||
**Purpose**: Abstraction layer for ClickHouse database access
|
||||
|
||||
**Key Methods**:
|
||||
- `Query()`: Execute SQL query
|
||||
- `QueryRow()`: Execute query returning single row
|
||||
- `Select()`: Execute query returning multiple rows
|
||||
|
||||
**Implementation**: `clickhouseTelemetryStore` (`pkg/telemetrystore/clickhousetelemetrystore/`)
|
||||
|
||||
### 5. Metadata Store
|
||||
|
||||
**Location**: `pkg/types/telemetrytypes/`
|
||||
|
||||
**Purpose**: Provides metadata about available fields, keys, and attributes
|
||||
|
||||
**Key Methods**:
|
||||
- `GetKeysMulti()`: Get field keys for multiple selectors
|
||||
- `FetchTemporalityMulti()`: Get metric temporality information
|
||||
|
||||
**Implementation**: `telemetryMetadataStore` (`pkg/telemetrymetadata/`)
|
||||
|
||||
### 6. Bucket Cache
|
||||
|
||||
**Location**: `pkg/querier/`
|
||||
|
||||
**Purpose**: Caches query results by time buckets for performance
|
||||
|
||||
**Key Methods**:
|
||||
- `GetMissRanges()`: Get time ranges not in cache
|
||||
- `Put()`: Store query result in cache
|
||||
|
||||
**Features**:
|
||||
- Bucket-based caching (aligned to step intervals)
|
||||
- Automatic cache invalidation
|
||||
- Parallel query execution for missing ranges
|
||||
|
||||
---
|
||||
|
||||
## Query Execution
|
||||
|
||||
### Builder Query Execution
|
||||
|
||||
**Location**: `pkg/querier/builder_query.go`
|
||||
|
||||
**Process**:
|
||||
1. Statement builder generates SQL
|
||||
2. SQL executed against ClickHouse via TelemetryStore
|
||||
3. Results processed based on RequestType:
|
||||
- TimeSeries: Grouped by time buckets and labels
|
||||
- Scalar: Single value extraction
|
||||
- Raw: Row-by-row processing
|
||||
4. Statistics collected (rows scanned, bytes scanned, duration)
|
||||
|
||||
### PromQL Query Execution
|
||||
|
||||
**Location**: `pkg/querier/promql_query.go`
|
||||
|
||||
**Process**:
|
||||
1. Query parsed by Prometheus engine
|
||||
2. Executed against Prometheus-compatible data
|
||||
3. Results converted to QueryRangeResponse format
|
||||
|
||||
### ClickHouse SQL Query Execution
|
||||
|
||||
**Location**: `pkg/querier/ch_sql_query.go`
|
||||
|
||||
**Process**:
|
||||
1. SQL query executed directly
|
||||
2. Results processed based on RequestType
|
||||
3. Variable substitution applied
|
||||
|
||||
### Formula Query Execution
|
||||
|
||||
**Location**: `pkg/querier/formula_query.go`
|
||||
|
||||
**Process**:
|
||||
1. Referenced queries executed first
|
||||
2. Formula expression evaluated using govaluate
|
||||
3. Results computed from query results
|
||||
|
||||
### Trace Operator Query Execution
|
||||
|
||||
**Location**: `pkg/querier/trace_operator_query.go`
|
||||
|
||||
**Process**:
|
||||
1. Expression parsed to find dependencies
|
||||
2. Referenced queries executed
|
||||
3. Set operations applied (INTERSECT, UNION, EXCEPT)
|
||||
4. Results combined
|
||||
|
||||
---
|
||||
|
||||
## Caching
|
||||
|
||||
### Caching Strategy
|
||||
|
||||
**Location**: `pkg/querier/querier.go:642`
|
||||
|
||||
**When Caching Applies**:
|
||||
- Time series queries only
|
||||
- Queries with valid fingerprints
|
||||
- `NoCache` flag not set
|
||||
|
||||
**How It Works**:
|
||||
1. Query fingerprint generated (includes query structure, filters, time range)
|
||||
2. Cache checked for existing results
|
||||
3. Missing time ranges identified
|
||||
4. Queries executed only for missing ranges (parallel execution)
|
||||
5. Fresh results merged with cached results
|
||||
6. Merged result stored in cache
|
||||
|
||||
### Cache Key Generation
|
||||
|
||||
**Location**: `pkg/querier/builder_query.go:52`
|
||||
|
||||
The fingerprint includes:
|
||||
- Signal type
|
||||
- Source type
|
||||
- Step interval
|
||||
- Aggregations
|
||||
- Filters
|
||||
- Group by fields
|
||||
- Time range (for cache key, not fingerprint)
|
||||
|
||||
### Cache Benefits
|
||||
|
||||
- **Performance**: Avoids re-executing identical queries
|
||||
- **Efficiency**: Only queries missing time ranges
|
||||
- **Parallelism**: Multiple missing ranges queried in parallel
|
||||
|
||||
---
|
||||
|
||||
## Result Processing
|
||||
|
||||
### Result Merging
|
||||
|
||||
**Location**: `pkg/querier/querier.go:795`
|
||||
|
||||
**Process**:
|
||||
1. Results from multiple queries collected
|
||||
2. For time series: Series merged by labels
|
||||
3. For raw data: Rows combined
|
||||
4. Statistics aggregated (rows scanned, bytes scanned, duration)
|
||||
|
||||
### Formula Evaluation
|
||||
|
||||
**Location**: `pkg/querier/formula_query.go`
|
||||
|
||||
**Process**:
|
||||
1. Formula expression parsed
|
||||
2. Referenced query results retrieved
|
||||
3. Expression evaluated using govaluate library
|
||||
4. Result computed and formatted
|
||||
|
||||
### Variable Substitution
|
||||
|
||||
**Location**: `pkg/querier/querier.go`
|
||||
|
||||
**Process**:
|
||||
1. Variables extracted from request
|
||||
2. Variable values substituted in queries
|
||||
3. Applied to filters, aggregations, and other query parts
|
||||
|
||||
---
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Query Optimization
|
||||
|
||||
1. **Time Range Optimization**:
|
||||
- For trace queries with `trace_id` filter, query `trace_summary` first to narrow time range
|
||||
- Use appropriate time ranges to limit data scanned
|
||||
|
||||
2. **Step Interval Calculation**:
|
||||
- Automatic step interval calculation based on time range
|
||||
- Minimum step interval enforcement
|
||||
- Warnings for suboptimal intervals
|
||||
|
||||
3. **Index Usage**:
|
||||
- Queries use time bucket columns (`ts_bucket_start`) for efficient filtering
|
||||
- Proper filter placement for index utilization
|
||||
|
||||
4. **Limit Enforcement**:
|
||||
- Raw data queries should include limits
|
||||
- Pagination support via offset/cursor
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Use Query Builder**: Prefer query builder over raw SQL for better optimization
|
||||
2. **Limit Time Ranges**: Always specify reasonable time ranges
|
||||
3. **Use Aggregations**: For large datasets, use aggregations instead of raw data
|
||||
4. **Cache Awareness**: Be mindful of cache TTLs when testing
|
||||
5. **Parallel Queries**: Multiple independent queries execute in parallel
|
||||
6. **Step Intervals**: Let system calculate optimal step intervals
|
||||
|
||||
### Monitoring
|
||||
|
||||
Execution statistics are included in response:
|
||||
- `RowsScanned`: Total rows scanned
|
||||
- `BytesScanned`: Total bytes scanned
|
||||
- `DurationMS`: Query execution time
|
||||
- `StepIntervals`: Step intervals per query
|
||||
|
||||
---
|
||||
|
||||
## Extending the API
|
||||
|
||||
### Adding a New Query Type
|
||||
|
||||
1. **Define Query Type** (`pkg/types/querybuildertypes/querybuildertypesv5/query.go`):
|
||||
```go
|
||||
const (
|
||||
QueryTypeMyNewType QueryType = "my_new_type"
|
||||
)
|
||||
```
|
||||
|
||||
2. **Define Query Spec**:
|
||||
```go
|
||||
type MyNewQuerySpec struct {
|
||||
Name string
|
||||
// ... your fields
|
||||
}
|
||||
```
|
||||
|
||||
3. **Update QueryEnvelope Unmarshaling** (`pkg/types/querybuildertypes/querybuildertypesv5/query.go`):
|
||||
```go
|
||||
case QueryTypeMyNewType:
|
||||
var spec MyNewQuerySpec
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "my new query spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid my new query spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
```
|
||||
|
||||
4. **Implement Query Interface** (`pkg/querier/my_new_query.go`):
|
||||
```go
|
||||
type myNewQuery struct {
|
||||
spec MyNewQuerySpec
|
||||
// ... other fields
|
||||
}
|
||||
|
||||
func (q *myNewQuery) Execute(ctx context.Context) (*qbtypes.Result, error) {
|
||||
// Implementation
|
||||
}
|
||||
|
||||
func (q *myNewQuery) Fingerprint() string {
|
||||
// Generate fingerprint for caching
|
||||
}
|
||||
|
||||
func (q *myNewQuery) Window() (uint64, uint64) {
|
||||
// Return time range
|
||||
}
|
||||
```
|
||||
|
||||
5. **Update Querier** (`pkg/querier/querier.go`):
|
||||
```go
|
||||
case QueryTypeMyNewType:
|
||||
myQuery, ok := query.Spec.(MyNewQuerySpec)
|
||||
if !ok {
|
||||
return nil, errors.NewInvalidInputf(...)
|
||||
}
|
||||
queries[myQuery.Name] = newMyNewQuery(myQuery, ...)
|
||||
```
|
||||
|
||||
### Adding a New Request Type
|
||||
|
||||
1. **Define Request Type** (`pkg/types/querybuildertypes/querybuildertypesv5/req.go`):
|
||||
```go
|
||||
const (
|
||||
RequestTypeMyNewType RequestType = "my_new_type"
|
||||
)
|
||||
```
|
||||
|
||||
2. **Update Statement Builders**: Add handling in `Build()` method
|
||||
3. **Update Query Execution**: Add result processing for new type
|
||||
4. **Update Response Models**: Add response data structure
|
||||
|
||||
### Adding a New Aggregation Function
|
||||
|
||||
1. **Update Aggregation Rewriter** (`pkg/querybuilder/agg_expr_rewriter.go`):
|
||||
```go
|
||||
func (r *aggExprRewriter) RewriteAggregation(expr string) (string, error) {
|
||||
if strings.HasPrefix(expr, "my_function(") {
|
||||
// Parse arguments
|
||||
// Return ClickHouse SQL expression
|
||||
return "myClickHouseFunction(...)", nil
|
||||
}
|
||||
// ... existing functions
|
||||
}
|
||||
```
|
||||
|
||||
2. **Update Documentation**: Document the new function
|
||||
|
||||
---
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Pattern 1: Simple Time Series Query
|
||||
|
||||
```go
|
||||
req := qbtypes.QueryRangeRequest{
|
||||
Start: startMs,
|
||||
End: endMs,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{Expression: "sum(rate)", Alias: "total"},
|
||||
},
|
||||
StepInterval: qbtypes.Step{Duration: time.Minute},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Pattern 2: Query with Filter and Group By
|
||||
|
||||
```go
|
||||
req := qbtypes.QueryRangeRequest{
|
||||
Start: startMs,
|
||||
End: endMs,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'api' AND duration_nano > 1000000",
|
||||
},
|
||||
Aggregations: []qbtypes.TraceAggregation{
|
||||
{Expression: "count()", Alias: "total"},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Pattern 3: Formula Query
|
||||
|
||||
```go
|
||||
req := qbtypes.QueryRangeRequest{
|
||||
Start: startMs,
|
||||
End: endMs,
|
||||
RequestType: qbtypes.RequestTypeTimeSeries,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
// ... query A definition
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
// ... query B definition
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: qbtypes.QueryTypeFormula,
|
||||
Spec: qbtypes.QueryBuilderFormula{
|
||||
Name: "C",
|
||||
Expression: "A / B * 100",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
|
||||
- `pkg/querier/querier_test.go` - Querier tests
|
||||
- `pkg/querier/builder_query_test.go` - Builder query tests
|
||||
- `pkg/querier/formula_query_test.go` - Formula query tests
|
||||
|
||||
### Integration Tests
|
||||
|
||||
- `tests/integration/` - End-to-end API tests
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
# Run all querier tests
|
||||
go test ./pkg/querier/...
|
||||
|
||||
# Run with verbose output
|
||||
go test -v ./pkg/querier/...
|
||||
|
||||
# Run specific test
|
||||
go test -v ./pkg/querier/ -run TestQueryRange
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Debugging
|
||||
|
||||
### Enable Debug Logging
|
||||
|
||||
```go
|
||||
// In querier.go
|
||||
q.logger.DebugContext(ctx, "Executing query",
|
||||
"query", queryName,
|
||||
"start", start,
|
||||
"end", end)
|
||||
```
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Query Not Found**: Check query name matches in CompositeQuery
|
||||
2. **SQL Errors**: Check generated SQL in logs, verify ClickHouse syntax
|
||||
3. **Performance**: Check execution statistics, optimize time ranges
|
||||
4. **Cache Issues**: Set `NoCache: true` to bypass cache
|
||||
5. **Formula Errors**: Check formula expression syntax and referenced query names
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
### Key Files
|
||||
|
||||
- `pkg/querier/querier.go` - Main query orchestration
|
||||
- `pkg/querier/builder_query.go` - Builder query execution
|
||||
- `pkg/types/querybuildertypes/querybuildertypesv5/` - Request/response models
|
||||
- `pkg/telemetrystore/` - ClickHouse interface
|
||||
- `pkg/telemetrymetadata/` - Metadata store
|
||||
|
||||
### Signal-Specific Documentation
|
||||
|
||||
- [Traces Module](./TRACES_MODULE.md) - Trace-specific details
|
||||
- Logs module documentation (when available)
|
||||
- Metrics module documentation (when available)
|
||||
|
||||
### Related Documentation
|
||||
|
||||
- [ClickHouse Documentation](https://clickhouse.com/docs)
|
||||
- [PromQL Documentation](https://prometheus.io/docs/prometheus/latest/querying/basics/)
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
When contributing to the Query Range API:
|
||||
|
||||
1. **Follow Existing Patterns**: Match the style of existing query types
|
||||
2. **Add Tests**: Include unit tests for new functionality
|
||||
3. **Update Documentation**: Update this doc for significant changes
|
||||
4. **Consider Performance**: Optimize queries and use caching appropriately
|
||||
5. **Handle Errors**: Provide meaningful error messages
|
||||
|
||||
For questions or help, reach out to the maintainers or open an issue.
|
||||
185
docs/implementation/SPAN_METRICS_PROCESSOR.md
Normal file
@@ -0,0 +1,185 @@
|
||||
# SigNoz Span Metrics Processor
|
||||
|
||||
The `signozspanmetricsprocessor` is an OpenTelemetry Collector processor that intercepts trace data to generate RED metrics (Rate, Errors, Duration) from spans.
|
||||
|
||||
**Location:** `signoz-otel-collector/processor/signozspanmetricsprocessor/`
|
||||
|
||||
## Trace Interception
|
||||
|
||||
The processor implements `consumer.Traces` interface and sits in the traces pipeline:
|
||||
|
||||
```go
|
||||
func (p *processorImp) ConsumeTraces(ctx context.Context, traces ptrace.Traces) error {
|
||||
p.lock.Lock()
|
||||
p.aggregateMetrics(traces)
|
||||
p.lock.Unlock()
|
||||
|
||||
return p.tracesConsumer.ConsumeTraces(ctx, traces) // forward unchanged
|
||||
}
|
||||
```
|
||||
|
||||
All traces flow through this method. Metrics are aggregated, then traces are forwarded unmodified to the next consumer.
|
||||
|
||||
## Metrics Generated
|
||||
|
||||
| Metric | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `signoz_latency` | Histogram | Span latency by service/operation/kind/status |
|
||||
| `signoz_calls_total` | Counter | Call count per service/operation/kind/status |
|
||||
| `signoz_db_latency_sum/count` | Counter | DB call latency (spans with `db.system` attribute) |
|
||||
| `signoz_external_call_latency_sum/count` | Counter | External call latency (client spans with remote address) |
|
||||
|
||||
### Dimensions
|
||||
|
||||
All metrics include these base dimensions:
|
||||
- `service.name` - from resource attributes
|
||||
- `operation` - span name
|
||||
- `span.kind` - SPAN_KIND_SERVER, SPAN_KIND_CLIENT, etc.
|
||||
- `status.code` - STATUS_CODE_OK, STATUS_CODE_ERROR, etc.
|
||||
|
||||
Additional dimensions can be configured.
|
||||
|
||||
## Aggregation Flow
|
||||
|
||||
```
|
||||
traces pipeline
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ ConsumeTraces() │
|
||||
│ │ │
|
||||
│ ▼ │
|
||||
│ aggregateMetrics(traces) │
|
||||
│ │ │
|
||||
│ ├── for each ResourceSpan │
|
||||
│ │ extract service.name │
|
||||
│ │ │ │
|
||||
│ │ ├── for each Span │
|
||||
│ │ │ │ │
|
||||
│ │ │ ▼ │
|
||||
│ │ │ aggregateMetricsForSpan() │
|
||||
│ │ │ ├── skip stale spans (>24h) │
|
||||
│ │ │ ├── skip excluded patterns │
|
||||
│ │ │ ├── calculate latency │
|
||||
│ │ │ ├── build metric key │
|
||||
│ │ │ ├── update histograms │
|
||||
│ │ │ └── cache dimensions │
|
||||
│ │ │ │
|
||||
│ ▼ │
|
||||
│ forward traces to next consumer │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Periodic Export
|
||||
|
||||
A background goroutine exports aggregated metrics on a ticker interval:
|
||||
|
||||
```go
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-p.ticker.C:
|
||||
p.exportMetrics(ctx) // build and send to metrics exporter
|
||||
}
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
## Key Design Features
|
||||
|
||||
### 1. Time Bucketing (Delta Temporality)
|
||||
|
||||
For delta temporality, metric keys include a time bucket prefix:
|
||||
|
||||
```go
|
||||
if p.config.GetAggregationTemporality() == pmetric.AggregationTemporalityDelta {
|
||||
p.AddTimeToKeyBuf(span.StartTimestamp().AsTime()) // truncated to interval
|
||||
}
|
||||
```
|
||||
|
||||
- Spans are grouped by time bucket (default: 1 minute)
|
||||
- After export, buckets are reset
|
||||
- Memory-efficient for high-cardinality data
|
||||
|
||||
### 2. LRU Dimension Caching
|
||||
|
||||
Dimension key-value maps are cached to avoid rebuilding:
|
||||
|
||||
```go
|
||||
if _, has := p.metricKeyToDimensions.Get(k); !has {
|
||||
p.metricKeyToDimensions.Add(k, p.buildDimensionKVs(...))
|
||||
}
|
||||
```
|
||||
|
||||
- Configurable cache size (`DimensionsCacheSize`)
|
||||
- Evicted keys also removed from histograms
|
||||
|
||||
### 3. Cardinality Protection
|
||||
|
||||
Prevents memory explosion from high cardinality:
|
||||
|
||||
```go
|
||||
if len(p.serviceToOperations) > p.maxNumberOfServicesToTrack {
|
||||
serviceName = "overflow_service"
|
||||
}
|
||||
if len(p.serviceToOperations[serviceName]) > p.maxNumberOfOperationsToTrackPerService {
|
||||
spanName = "overflow_operation"
|
||||
}
|
||||
```
|
||||
|
||||
Excess services/operations are aggregated into overflow buckets.
|
||||
|
||||
### 4. Exemplars
|
||||
|
||||
Trace/span IDs attached to histogram samples for metric-to-trace correlation:
|
||||
|
||||
```go
|
||||
histo.exemplarsData = append(histo.exemplarsData, exemplarData{
|
||||
traceID: traceID,
|
||||
spanID: spanID,
|
||||
value: latency,
|
||||
})
|
||||
```
|
||||
|
||||
Enables "show me a trace that caused this latency spike" in UI.
|
||||
|
||||
## Configuration Options
|
||||
|
||||
| Option | Description | Default |
|
||||
|--------|-------------|---------|
|
||||
| `metrics_exporter` | Target exporter for generated metrics | required |
|
||||
| `latency_histogram_buckets` | Custom histogram bucket boundaries | 2,4,6,8,10,50,100,200,400,800,1000,1400,2000,5000,10000,15000 ms |
|
||||
| `dimensions` | Additional span/resource attributes to include | [] |
|
||||
| `dimensions_cache_size` | LRU cache size for dimension maps | 1000 |
|
||||
| `aggregation_temporality` | cumulative or delta | cumulative |
|
||||
| `time_bucket_interval` | Bucket interval for delta temporality | 1m |
|
||||
| `skip_spans_older_than` | Skip stale spans | 24h |
|
||||
| `max_services_to_track` | Cardinality limit for services | - |
|
||||
| `max_operations_to_track_per_service` | Cardinality limit for operations | - |
|
||||
| `exclude_patterns` | Regex patterns to skip spans | [] |
|
||||
|
||||
## Pipeline Configuration Example
|
||||
|
||||
```yaml
|
||||
processors:
|
||||
signozspanmetrics:
|
||||
metrics_exporter: clickhousemetricswrite
|
||||
latency_histogram_buckets: [2ms, 4ms, 6ms, 8ms, 10ms, 50ms, 100ms, 200ms]
|
||||
dimensions:
|
||||
- name: http.method
|
||||
- name: http.status_code
|
||||
dimensions_cache_size: 10000
|
||||
aggregation_temporality: delta
|
||||
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [signozspanmetrics, batch]
|
||||
exporters: [clickhousetraces]
|
||||
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
exporters: [clickhousemetricswrite]
|
||||
```
|
||||
|
||||
The processor sits in the traces pipeline but exports to a metrics pipeline exporter.
|
||||
832
docs/implementation/TRACES_MODULE.md
Normal file
@@ -0,0 +1,832 @@
|
||||
# SigNoz Traces Module - Developer Guide
|
||||
|
||||
This document provides a comprehensive guide to understanding and contributing to the traces module in SigNoz. It covers architecture, APIs, code flows, and implementation details.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Overview](#overview)
|
||||
2. [Architecture](#architecture)
|
||||
3. [Data Models](#data-models)
|
||||
4. [API Endpoints](#api-endpoints)
|
||||
5. [Code Flows](#code-flows)
|
||||
6. [Key Components](#key-components)
|
||||
7. [Query Building System](#query-building-system)
|
||||
8. [Storage Schema](#storage-schema)
|
||||
9. [Extending the Traces Module](#extending-the-traces-module)
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The traces module in SigNoz handles distributed tracing data from OpenTelemetry. It provides:
|
||||
|
||||
- **Ingestion**: Receives traces via OpenTelemetry Collector
|
||||
- **Storage**: Stores traces in ClickHouse
|
||||
- **Querying**: Supports complex queries with filters, aggregations, and trace operators
|
||||
- **Visualization**: Provides waterfall and flamegraph views
|
||||
- **Trace Funnels**: Advanced analytics for multi-step trace analysis
|
||||
|
||||
### Key Technologies
|
||||
|
||||
- **Backend**: Go (Golang)
|
||||
- **Storage**: ClickHouse (columnar database)
|
||||
- **Protocol**: OpenTelemetry Protocol (OTLP)
|
||||
- **Query Language**: Custom query builder + ClickHouse SQL
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
### High-Level Flow
|
||||
|
||||
```
|
||||
Application → OpenTelemetry SDK → OTLP Receiver →
|
||||
[Processors: signozspanmetrics, batch] →
|
||||
ClickHouse Traces Exporter → ClickHouse Database
|
||||
↓
|
||||
Query Service (Go)
|
||||
↓
|
||||
Frontend (React/TypeScript)
|
||||
```
|
||||
|
||||
### Component Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ Frontend (React) │
|
||||
│ - TracesExplorer │
|
||||
│ - TraceDetail (Waterfall/Flamegraph) │
|
||||
│ - Query Builder UI │
|
||||
└────────────────────┬────────────────────────────────────┘
|
||||
│ HTTP/REST API
|
||||
┌────────────────────▼────────────────────────────────────┐
|
||||
│ Query Service (Go) │
|
||||
│ ┌──────────────────────────────────────────────────┐ │
|
||||
│ │ HTTP Handlers (http_handler.go) │ │
|
||||
│ │ - QueryRangeV5 (Main query endpoint) │ │
|
||||
│ │ - GetWaterfallSpansForTrace │ │
|
||||
│ │ - GetFlamegraphSpansForTrace │ │
|
||||
│ │ - Trace Fields API │ │
|
||||
│ └──────────────────────────────────────────────────┘ │
|
||||
│ ┌──────────────────────────────────────────────────┐ │
|
||||
│ │ Querier (querier.go) │ │
|
||||
│ │ - Query orchestration │ │
|
||||
│ │ - Cache management │ │
|
||||
│ │ - Result merging │ │
|
||||
│ └──────────────────────────────────────────────────┘ │
|
||||
│ ┌──────────────────────────────────────────────────┐ │
|
||||
│ │ Statement Builders │ │
|
||||
│ │ - traceQueryStatementBuilder │ │
|
||||
│ │ - traceOperatorStatementBuilder │ │
|
||||
│ │ - Builds ClickHouse SQL from query specs │ │
|
||||
│ └──────────────────────────────────────────────────┘ │
|
||||
│ ┌──────────────────────────────────────────────────┐ │
|
||||
│ │ ClickHouse Reader (clickhouseReader/) │ │
|
||||
│ │ - Direct trace retrieval │ │
|
||||
│ │ - Waterfall/Flamegraph data processing │ │
|
||||
│ └──────────────────────────────────────────────────┘ │
|
||||
└────────────────────┬────────────────────────────────────┘
|
||||
│ ClickHouse Protocol
|
||||
┌────────────────────▼────────────────────────────────────┐
|
||||
│ ClickHouse Database │
|
||||
│ - signoz_traces.distributed_signoz_index_v3 │
|
||||
│ - signoz_traces.distributed_trace_summary │
|
||||
│ - signoz_traces.distributed_tag_attributes_v2 │
|
||||
└──────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Data Models
|
||||
|
||||
### Core Trace Models
|
||||
|
||||
**Location**: `pkg/query-service/model/trace.go`
|
||||
|
||||
### Query Request Models
|
||||
|
||||
**Location**: `pkg/types/querybuildertypes/querybuildertypesv5/`
|
||||
|
||||
- `QueryRangeRequest`: Main query request structure
|
||||
- `QueryBuilderQuery[TraceAggregation]`: Query builder specification for traces
|
||||
- `QueryBuilderTraceOperator`: Trace operator query specification
|
||||
- `CompositeQuery`: Container for multiple queries
|
||||
|
||||
---
|
||||
|
||||
## API Endpoints
|
||||
|
||||
### 1. Query Range API (V5) - Primary Query Endpoint
|
||||
|
||||
**Endpoint**: `POST /api/v5/query_range`
|
||||
|
||||
**Handler**: `QuerierAPI.QueryRange` → `querier.QueryRange`
|
||||
|
||||
**Purpose**: Main query endpoint for traces, logs, and metrics. Supports:
|
||||
- Query builder queries
|
||||
- Trace operator queries
|
||||
- Aggregations, filters, group by
|
||||
- Time series, scalar, and raw data requests
|
||||
|
||||
> **Note**: For detailed information about the Query Range API, including request/response models, query types, and common code flows, see the [Query Range API Documentation](./QUERY_RANGE_API.md).
|
||||
|
||||
**Trace-Specific Details**:
|
||||
- Uses `traceQueryStatementBuilder` for SQL generation
|
||||
- Supports trace-specific aggregations (count, avg, p99, etc. on duration_nano)
|
||||
- Trace operator queries combine multiple trace queries with set operations
|
||||
- Time range optimization when `trace_id` filter is present
|
||||
|
||||
**Key Files**:
|
||||
- `pkg/telemetrytraces/statement_builder.go` - Trace SQL generation
|
||||
- `pkg/telemetrytraces/trace_operator_statement_builder.go` - Trace operator SQL
|
||||
- `pkg/querier/trace_operator_query.go` - Trace operator execution
|
||||
|
||||
### 2. Waterfall View API
|
||||
|
||||
**Endpoint**: `POST /api/v2/traces/waterfall/{traceId}`
|
||||
|
||||
**Handler**: `GetWaterfallSpansForTraceWithMetadata`
|
||||
|
||||
**Purpose**: Retrieves spans for waterfall visualization with metadata
|
||||
|
||||
**Request Parameters**:
|
||||
```go
|
||||
type GetWaterfallSpansForTraceWithMetadataParams struct {
|
||||
SelectedSpanID string // Selected span to focus on
|
||||
IsSelectedSpanIDUnCollapsed bool // Whether selected span is expanded
|
||||
UncollapsedSpans []string // List of expanded span IDs
|
||||
}
|
||||
```
|
||||
|
||||
**Response**:
|
||||
```go
|
||||
type GetWaterfallSpansForTraceWithMetadataResponse struct {
|
||||
StartTimestampMillis uint64 // Trace start time
|
||||
EndTimestampMillis uint64 // Trace end time
|
||||
DurationNano uint64 // Total duration
|
||||
RootServiceName string // Root service
|
||||
RootServiceEntryPoint string // Entry point operation
|
||||
TotalSpansCount uint64 // Total spans
|
||||
TotalErrorSpansCount uint64 // Error spans
|
||||
ServiceNameToTotalDurationMap map[string]uint64 // Service durations
|
||||
Spans []*Span // Span tree
|
||||
HasMissingSpans bool // Missing spans indicator
|
||||
UncollapsedSpans []string // Expanded spans
|
||||
}
|
||||
```
|
||||
|
||||
**Code Flow**:
|
||||
```
|
||||
Handler → ClickHouseReader.GetWaterfallSpansForTraceWithMetadata
|
||||
→ Query trace_summary for time range
|
||||
→ Query spans from signoz_index_v3
|
||||
→ Build span tree structure
|
||||
→ Apply uncollapsed/selected span logic
|
||||
→ Return filtered spans (500 span limit)
|
||||
```
|
||||
|
||||
**Key Files**:
|
||||
- `pkg/query-service/app/http_handler.go:1748` - Handler
|
||||
- `pkg/query-service/app/clickhouseReader/reader.go:873` - Implementation
|
||||
- `pkg/query-service/app/traces/tracedetail/waterfall.go` - Tree processing
|
||||
|
||||
### 3. Flamegraph View API
|
||||
|
||||
**Endpoint**: `POST /api/v2/traces/flamegraph/{traceId}`
|
||||
|
||||
**Handler**: `GetFlamegraphSpansForTrace`
|
||||
|
||||
**Purpose**: Retrieves spans organized by level for flamegraph visualization
|
||||
|
||||
**Request Parameters**:
|
||||
```go
|
||||
type GetFlamegraphSpansForTraceParams struct {
|
||||
SelectedSpanID string // Selected span ID
|
||||
}
|
||||
```
|
||||
|
||||
**Response**:
|
||||
```go
|
||||
type GetFlamegraphSpansForTraceResponse struct {
|
||||
StartTimestampMillis uint64 // Trace start
|
||||
EndTimestampMillis uint64 // Trace end
|
||||
DurationNano uint64 // Total duration
|
||||
Spans [][]*FlamegraphSpan // Spans organized by level
|
||||
}
|
||||
```
|
||||
|
||||
**Code Flow**:
|
||||
```
|
||||
Handler → ClickHouseReader.GetFlamegraphSpansForTrace
|
||||
→ Query trace_summary for time range
|
||||
→ Query spans from signoz_index_v3
|
||||
→ Build span tree
|
||||
→ BFS traversal to organize by level
|
||||
→ Sample spans (50 levels, 100 spans/level max)
|
||||
→ Return level-organized spans
|
||||
```
|
||||
|
||||
**Key Files**:
|
||||
- `pkg/query-service/app/http_handler.go:1781` - Handler
|
||||
- `pkg/query-service/app/clickhouseReader/reader.go:1091` - Implementation
|
||||
- `pkg/query-service/app/traces/tracedetail/flamegraph.go` - BFS processing
|
||||
|
||||
### 4. Trace Fields API
|
||||
|
||||
**Endpoint**:
|
||||
- `GET /api/v2/traces/fields` - Get available trace fields
|
||||
- `POST /api/v2/traces/fields` - Update trace field metadata
|
||||
|
||||
**Handler**: `traceFields`, `updateTraceField`
|
||||
|
||||
**Purpose**: Manage trace field metadata for query builder
|
||||
|
||||
**Key Files**:
|
||||
- `pkg/query-service/app/http_handler.go:4912` - Get handler
|
||||
- `pkg/query-service/app/http_handler.go:4921` - Update handler
|
||||
|
||||
### 5. Trace Funnels API
|
||||
|
||||
**Endpoint**: `/api/v1/trace-funnels/*`
|
||||
|
||||
**Purpose**: Manage trace funnels (multi-step trace analysis)
|
||||
|
||||
**Endpoints**:
|
||||
- `POST /api/v1/trace-funnels/new` - Create funnel
|
||||
- `GET /api/v1/trace-funnels/list` - List funnels
|
||||
- `GET /api/v1/trace-funnels/{funnel_id}` - Get funnel
|
||||
- `PUT /api/v1/trace-funnels/{funnel_id}` - Update funnel
|
||||
- `DELETE /api/v1/trace-funnels/{funnel_id}` - Delete funnel
|
||||
- `POST /api/v1/trace-funnels/{funnel_id}/analytics/*` - Analytics endpoints
|
||||
|
||||
**Key Files**:
|
||||
- `pkg/query-service/app/http_handler.go:5084` - Route registration
|
||||
- `pkg/modules/tracefunnel/` - Funnel implementation
|
||||
|
||||
---
|
||||
|
||||
## Code Flows
|
||||
|
||||
### Flow 1: Query Range Request (V5)
|
||||
|
||||
This is the primary query flow for traces. For the complete flow covering all query types, see the [Query Range API Documentation](./QUERY_RANGE_API.md#code-flow).
|
||||
|
||||
**Trace-Specific Flow**:
|
||||
|
||||
```
|
||||
1. HTTP Request
|
||||
POST /api/v5/query_range
|
||||
↓
|
||||
2. Querier.QueryRange (common flow - see QUERY_RANGE_API.md)
|
||||
↓
|
||||
3. Trace Query Processing:
|
||||
a. Builder Query (QueryTypeBuilder with SignalTraces):
|
||||
- newBuilderQuery() creates builderQuery instance
|
||||
- Uses traceStmtBuilder (traceQueryStatementBuilder)
|
||||
↓
|
||||
b. Trace Operator Query (QueryTypeTraceOperator):
|
||||
- newTraceOperatorQuery() creates traceOperatorQuery
|
||||
- Uses traceOperatorStmtBuilder
|
||||
↓
|
||||
4. Trace Statement Building
|
||||
traceQueryStatementBuilder.Build() (pkg/telemetrytraces/statement_builder.go:58)
|
||||
- Resolves trace field keys from metadata store
|
||||
- Optimizes time range if trace_id filter present (queries trace_summary)
|
||||
- Maps fields using traceFieldMapper
|
||||
- Builds conditions using traceConditionBuilder
|
||||
- Builds SQL based on request type:
|
||||
* RequestTypeRaw → buildListQuery()
|
||||
* RequestTypeTimeSeries → buildTimeSeriesQuery()
|
||||
* RequestTypeScalar → buildScalarQuery()
|
||||
* RequestTypeTrace → buildTraceQuery()
|
||||
↓
|
||||
5. Query Execution
|
||||
builderQuery.Execute() (pkg/querier/builder_query.go)
|
||||
- Executes SQL against ClickHouse (signoz_traces database)
|
||||
- Processes results into response format
|
||||
↓
|
||||
6. Result Processing (common flow - see QUERY_RANGE_API.md)
|
||||
- Merges results from multiple queries
|
||||
- Applies formulas if present
|
||||
- Handles caching
|
||||
↓
|
||||
7. HTTP Response
|
||||
- Returns QueryRangeResponse with trace results
|
||||
```
|
||||
|
||||
**Trace-Specific Key Components**:
|
||||
- `pkg/telemetrytraces/statement_builder.go` - Trace SQL generation
|
||||
- `pkg/telemetrytraces/field_mapper.go` - Trace field mapping
|
||||
- `pkg/telemetrytraces/condition_builder.go` - Trace filter building
|
||||
- `pkg/telemetrytraces/trace_operator_statement_builder.go` - Trace operator SQL
|
||||
|
||||
### Flow 2: Waterfall View Request
|
||||
|
||||
```
|
||||
1. HTTP Request
|
||||
POST /api/v2/traces/waterfall/{traceId}
|
||||
↓
|
||||
2. GetWaterfallSpansForTraceWithMetadata handler
|
||||
- Extracts traceId from URL
|
||||
- Parses request body for params
|
||||
↓
|
||||
3. ClickHouseReader.GetWaterfallSpansForTraceWithMetadata
|
||||
- Checks cache first (5 minute TTL)
|
||||
↓
|
||||
4. If cache miss:
|
||||
a. Query trace_summary table
|
||||
SELECT * FROM distributed_trace_summary WHERE trace_id = ?
|
||||
- Gets time range (start, end, num_spans)
|
||||
↓
|
||||
b. Query spans table
|
||||
SELECT ... FROM distributed_signoz_index_v3
|
||||
WHERE trace_id = ?
|
||||
AND ts_bucket_start >= ? AND ts_bucket_start <= ?
|
||||
- Retrieves all spans for trace
|
||||
↓
|
||||
c. Build span tree
|
||||
- Parse references to build parent-child relationships
|
||||
- Identify root spans (no parent)
|
||||
- Calculate service durations
|
||||
↓
|
||||
d. Cache result
|
||||
↓
|
||||
5. Apply selection logic
|
||||
tracedetail.GetSelectedSpans()
|
||||
- Traverses tree based on uncollapsed spans
|
||||
- Finds path to selected span
|
||||
- Returns sliding window (500 spans max)
|
||||
↓
|
||||
6. HTTP Response
|
||||
- Returns spans with metadata
|
||||
```
|
||||
|
||||
**Key Components**:
|
||||
- `pkg/query-service/app/clickhouseReader/reader.go:873`
|
||||
- `pkg/query-service/app/traces/tracedetail/waterfall.go`
|
||||
- `pkg/query-service/model/trace.go`
|
||||
|
||||
### Flow 3: Trace Operator Query
|
||||
|
||||
Trace operators allow combining multiple trace queries with set operations.
|
||||
|
||||
```
|
||||
1. QueryRangeRequest with QueryTypeTraceOperator
|
||||
↓
|
||||
2. Querier identifies trace operator queries
|
||||
- Parses expression to find dependencies
|
||||
- Collects referenced queries
|
||||
↓
|
||||
3. traceOperatorStatementBuilder.Build()
|
||||
- Parses expression (e.g., "A AND B", "A OR B")
|
||||
- Builds expression tree
|
||||
↓
|
||||
4. traceOperatorCTEBuilder.build()
|
||||
- Creates CTEs (Common Table Expressions) for each query
|
||||
- Builds final query with set operations:
|
||||
* AND → INTERSECT
|
||||
* OR → UNION
|
||||
* NOT → EXCEPT
|
||||
↓
|
||||
5. Execute combined query
|
||||
- Returns traces matching the operator expression
|
||||
```
|
||||
|
||||
**Key Components**:
|
||||
- `pkg/telemetrytraces/trace_operator_statement_builder.go`
|
||||
- `pkg/telemetrytraces/trace_operator_cte_builder.go`
|
||||
- `pkg/querier/trace_operator_query.go`
|
||||
|
||||
---
|
||||
|
||||
## Key Components
|
||||
|
||||
> **Note**: For common components used across all signals (Querier, TelemetryStore, MetadataStore, etc.), see the [Query Range API Documentation](./QUERY_RANGE_API.md#key-components).
|
||||
|
||||
### 1. Trace Statement Builder
|
||||
|
||||
**Location**: `pkg/telemetrytraces/statement_builder.go`
|
||||
|
||||
**Purpose**: Converts trace query builder specifications into ClickHouse SQL
|
||||
|
||||
**Key Methods**:
|
||||
- `Build()`: Main entry point, builds SQL statement
|
||||
- `buildListQuery()`: Builds query for raw/list results
|
||||
- `buildTimeSeriesQuery()`: Builds query for time series
|
||||
- `buildScalarQuery()`: Builds query for scalar values
|
||||
- `buildTraceQuery()`: Builds query for trace-specific results
|
||||
|
||||
**Key Features**:
|
||||
- Trace field resolution via metadata store
|
||||
- Time range optimization for trace_id filters (queries trace_summary first)
|
||||
- Support for trace aggregations, filters, group by, ordering
|
||||
- Calculated field support (http_method, db_name, has_error, etc.)
|
||||
- Resource filter support via resourceFilterStmtBuilder
|
||||
|
||||
### 2. Trace Field Mapper
|
||||
|
||||
**Location**: `pkg/telemetrytraces/field_mapper.go`
|
||||
|
||||
**Purpose**: Maps trace query field names to ClickHouse column names
|
||||
|
||||
**Field Types**:
|
||||
- **Intrinsic Fields**: Built-in fields (trace_id, span_id, duration_nano, name, kind_string, status_code_string, etc.)
|
||||
- **Calculated Fields**: Derived fields (http_method, db_name, has_error, response_status_code, etc.)
|
||||
- **Attribute Fields**: Dynamic span/resource attributes (accessed via attributes_string, attributes_number, attributes_bool, resources_string)
|
||||
|
||||
**Example Mapping**:
|
||||
```
|
||||
"service.name" → "resource_string_service$$name"
|
||||
"http.method" → Calculated from attributes_string['http.method']
|
||||
"duration_nano" → "duration_nano" (intrinsic)
|
||||
"trace_id" → "trace_id" (intrinsic)
|
||||
```
|
||||
|
||||
**Key Methods**:
|
||||
- `MapField()`: Maps a field to ClickHouse expression
|
||||
- `MapAttribute()`: Maps attribute fields
|
||||
- `MapResource()`: Maps resource fields
|
||||
|
||||
### 3. Trace Condition Builder
|
||||
|
||||
**Location**: `pkg/telemetrytraces/condition_builder.go`
|
||||
|
||||
**Purpose**: Builds WHERE clause conditions from trace filter expressions
|
||||
|
||||
**Supported Operators**:
|
||||
- `=`, `!=`, `IN`, `NOT IN`
|
||||
- `>`, `>=`, `<`, `<=`
|
||||
- `LIKE`, `NOT LIKE`, `ILIKE`
|
||||
- `EXISTS`, `NOT EXISTS`
|
||||
- `CONTAINS`, `NOT CONTAINS`
|
||||
|
||||
**Key Methods**:
|
||||
- `BuildCondition()`: Builds condition from filter expression
|
||||
- Handles attribute, resource, and intrinsic field filtering
|
||||
|
||||
### 4. Trace Operator Statement Builder
|
||||
|
||||
**Location**: `pkg/telemetrytraces/trace_operator_statement_builder.go`
|
||||
|
||||
**Purpose**: Builds SQL for trace operator queries (AND, OR, NOT operations on trace queries)
|
||||
|
||||
**Key Methods**:
|
||||
- `Build()`: Builds CTE-based SQL for trace operators
|
||||
- Uses `traceOperatorCTEBuilder` to create Common Table Expressions
|
||||
|
||||
**Features**:
|
||||
- Parses operator expressions (e.g., "A AND B")
|
||||
- Creates CTEs for each referenced query
|
||||
- Combines results using INTERSECT, UNION, EXCEPT
|
||||
|
||||
### 5. ClickHouse Reader (Trace-Specific Methods)
|
||||
|
||||
**Location**: `pkg/query-service/app/clickhouseReader/reader.go`
|
||||
|
||||
**Purpose**: Direct trace data retrieval and processing (bypasses query builder)
|
||||
|
||||
**Key Methods**:
|
||||
- `GetWaterfallSpansForTraceWithMetadata()`: Waterfall view data
|
||||
- `GetFlamegraphSpansForTrace()`: Flamegraph view data
|
||||
- `SearchTraces()`: Legacy trace search (still used for some flows)
|
||||
- `GetMinAndMaxTimestampForTraceID()`: Time range optimization helper
|
||||
|
||||
**Caching**: Implements 5-minute cache for trace detail views
|
||||
|
||||
**Note**: These methods are used for trace-specific visualizations. For general trace queries, use the Query Range API.
|
||||
|
||||
---
|
||||
|
||||
## Query Building System
|
||||
|
||||
> **Note**: For general query building concepts and patterns, see the [Query Range API Documentation](./QUERY_RANGE_API.md). This section covers trace-specific aspects.
|
||||
|
||||
### Trace Query Builder Structure
|
||||
|
||||
A trace query consists of:
|
||||
|
||||
```go
|
||||
QueryBuilderQuery[TraceAggregation] {
|
||||
Name: "query_name",
|
||||
Signal: SignalTraces,
|
||||
Filter: &Filter {
|
||||
Expression: "service.name = 'api' AND duration_nano > 1000000"
|
||||
},
|
||||
Aggregations: []TraceAggregation {
|
||||
{Expression: "count()", Alias: "total"},
|
||||
{Expression: "avg(duration_nano)", Alias: "avg_duration"},
|
||||
{Expression: "p99(duration_nano)", Alias: "p99"},
|
||||
},
|
||||
GroupBy: []GroupByKey {
|
||||
{TelemetryFieldKey: {Name: "service.name", ...}},
|
||||
},
|
||||
Order: []OrderBy {...},
|
||||
Limit: 100,
|
||||
}
|
||||
```
|
||||
|
||||
### Trace-Specific SQL Generation Process
|
||||
|
||||
1. **Field Resolution**:
|
||||
- Resolve trace field names using `traceFieldMapper`
|
||||
- Handle intrinsic, calculated, and attribute fields
|
||||
- Map to ClickHouse columns (e.g., `service.name` → `resource_string_service$$name`)
|
||||
|
||||
2. **Time Range Optimization**:
|
||||
- If `trace_id` filter present, query `trace_summary` first
|
||||
- Narrow time range based on trace start/end times
|
||||
- Reduces data scanned significantly
|
||||
|
||||
3. **Filter Building**:
|
||||
- Convert filter expression using `traceConditionBuilder`
|
||||
- Handle attribute filters (attributes_string, attributes_number, attributes_bool)
|
||||
- Handle resource filters (resources_string)
|
||||
- Handle intrinsic field filters
|
||||
|
||||
4. **Aggregation Building**:
|
||||
- Build SELECT with trace aggregations
|
||||
- Support trace-specific functions (count, avg, p99, etc. on duration_nano)
|
||||
|
||||
5. **Group By Building**:
|
||||
- Add GROUP BY clause with trace fields
|
||||
- Support grouping by service.name, operation name, etc.
|
||||
|
||||
6. **Order Building**:
|
||||
- Add ORDER BY clause
|
||||
- Support ordering by duration, timestamp, etc.
|
||||
|
||||
7. **Limit/Offset**:
|
||||
- Add pagination
|
||||
|
||||
### Example Generated SQL
|
||||
|
||||
For query: `count() WHERE service.name = 'api' GROUP BY service.name`
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
count() AS total,
|
||||
resource_string_service$$name AS service_name
|
||||
FROM signoz_traces.distributed_signoz_index_v3
|
||||
WHERE
|
||||
timestamp >= toDateTime64(1234567890/1e9, 9)
|
||||
AND timestamp <= toDateTime64(1234567899/1e9, 9)
|
||||
AND ts_bucket_start >= toDateTime64(1234567890/1e9, 9)
|
||||
AND ts_bucket_start <= toDateTime64(1234567899/1e9, 9)
|
||||
AND resource_string_service$$name = 'api'
|
||||
GROUP BY resource_string_service$$name
|
||||
```
|
||||
|
||||
**Note**: The query uses `ts_bucket_start` for efficient time filtering (partitioning column).
|
||||
|
||||
---
|
||||
|
||||
## Storage Schema
|
||||
|
||||
### Main Tables
|
||||
|
||||
**Location**: `pkg/telemetrytraces/tables.go`
|
||||
|
||||
#### 1. `distributed_signoz_index_v3`
|
||||
|
||||
Main span index table. Stores all span data.
|
||||
|
||||
**Key Columns**:
|
||||
- `timestamp`: Span timestamp
|
||||
- `duration_nano`: Span duration
|
||||
- `span_id`, `trace_id`: Identifiers
|
||||
- `has_error`: Error indicator
|
||||
- `kind`: Span kind
|
||||
- `name`: Operation name
|
||||
- `attributes_string`, `attributes_number`, `attributes_bool`: Attributes
|
||||
- `resources_string`: Resource attributes
|
||||
- `events`: Span events
|
||||
- `status_code_string`, `status_message`: Status
|
||||
- `ts_bucket_start`: Time bucket for partitioning
|
||||
|
||||
#### 2. `distributed_trace_summary`
|
||||
|
||||
Trace-level summary for quick lookups.
|
||||
|
||||
**Columns**:
|
||||
- `trace_id`: Trace identifier
|
||||
- `start`: Earliest span timestamp
|
||||
- `end`: Latest span timestamp
|
||||
- `num_spans`: Total span count
|
||||
|
||||
#### 3. `distributed_tag_attributes_v2`
|
||||
|
||||
Metadata table for attribute keys.
|
||||
|
||||
**Purpose**: Stores available attribute keys for autocomplete
|
||||
|
||||
#### 4. `distributed_span_attributes_keys`
|
||||
|
||||
Span attribute keys metadata.
|
||||
|
||||
**Purpose**: Tracks which attributes exist in spans
|
||||
|
||||
### Database
|
||||
|
||||
All trace tables are in the `signoz_traces` database.
|
||||
|
||||
---
|
||||
|
||||
## Extending the Traces Module
|
||||
|
||||
### Adding a New Calculated Field
|
||||
|
||||
1. **Define Field in Constants** (`pkg/telemetrytraces/const.go`):
|
||||
```go
|
||||
CalculatedFields = map[string]telemetrytypes.TelemetryFieldKey{
|
||||
"my_new_field": {
|
||||
Name: "my_new_field",
|
||||
Description: "Description of the field",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
2. **Implement Field Mapping** (`pkg/telemetrytraces/field_mapper.go`):
|
||||
```go
|
||||
func (fm *fieldMapper) MapField(field telemetrytypes.TelemetryFieldKey) (string, error) {
|
||||
if field.Name == "my_new_field" {
|
||||
// Return ClickHouse expression
|
||||
return "attributes_string['my.attribute.key']", nil
|
||||
}
|
||||
// ... existing mappings
|
||||
}
|
||||
```
|
||||
|
||||
3. **Update Condition Builder** (if needed for filtering):
|
||||
```go
|
||||
// In condition_builder.go, add support for your field
|
||||
```
|
||||
|
||||
### Adding a New API Endpoint
|
||||
|
||||
1. **Add Handler Method** (`pkg/query-service/app/http_handler.go`):
|
||||
```go
|
||||
func (aH *APIHandler) MyNewTraceHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// Extract parameters
|
||||
// Call reader or querier
|
||||
// Return response
|
||||
}
|
||||
```
|
||||
|
||||
2. **Register Route** (in `RegisterRoutes` or separate method):
|
||||
```go
|
||||
router.HandleFunc("/api/v2/traces/my-endpoint",
|
||||
am.ViewAccess(aH.MyNewTraceHandler)).Methods(http.MethodPost)
|
||||
```
|
||||
|
||||
3. **Implement Logic**:
|
||||
- Add to `ClickHouseReader` if direct DB access needed
|
||||
- Or use `Querier` for query builder queries
|
||||
|
||||
### Adding a New Aggregation Function
|
||||
|
||||
1. **Update Aggregation Rewriter** (`pkg/querybuilder/agg_expr_rewriter.go`):
|
||||
```go
|
||||
func (r *aggExprRewriter) RewriteAggregation(expr string) (string, error) {
|
||||
// Add parsing for your function
|
||||
if strings.HasPrefix(expr, "my_function(") {
|
||||
// Return ClickHouse SQL expression
|
||||
return "myClickHouseFunction(...)", nil
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. **Update Statement Builder** (if special handling needed):
|
||||
```go
|
||||
// In statement_builder.go, add special case if needed
|
||||
```
|
||||
|
||||
### Adding Trace Operator Support
|
||||
|
||||
Trace operators are already extensible. To add a new operator:
|
||||
|
||||
1. **Update Grammar** (`grammar/TraceOperatorGrammar.g4`):
|
||||
```antlr
|
||||
operator: AND | OR | NOT | MY_NEW_OPERATOR;
|
||||
```
|
||||
|
||||
2. **Update CTE Builder** (`pkg/telemetrytraces/trace_operator_cte_builder.go`):
|
||||
```go
|
||||
func (b *traceOperatorCTEBuilder) buildOperatorQuery(op TraceOperatorType) string {
|
||||
switch op {
|
||||
case TraceOperatorTypeMyNewOperator:
|
||||
return "MY_CLICKHOUSE_OPERATION"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Pattern 1: Query with Filter
|
||||
|
||||
```go
|
||||
query := qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||
Name: "filtered_traces",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "service.name = 'api' AND duration_nano > 1000000",
|
||||
},
|
||||
Aggregations: []qbtypes.TraceAggregation{
|
||||
{Expression: "count()", Alias: "total"},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Pattern 2: Time Series Query
|
||||
|
||||
```go
|
||||
query := qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||
Name: "time_series",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Aggregations: []qbtypes.TraceAggregation{
|
||||
{Expression: "avg(duration_nano)", Alias: "avg_duration"},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
}},
|
||||
},
|
||||
StepInterval: qbtypes.Step{Duration: time.Minute},
|
||||
}
|
||||
```
|
||||
|
||||
### Pattern 3: Trace Operator Query
|
||||
|
||||
```go
|
||||
query := qbtypes.QueryBuilderTraceOperator{
|
||||
Name: "operator_query",
|
||||
Expression: "A AND B", // A and B are query names
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "duration_nano > 5000000",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Caching
|
||||
|
||||
- **Trace Detail Views**: 5-minute cache for waterfall/flamegraph
|
||||
- **Query Results**: Bucket-based caching in querier
|
||||
- **Metadata**: Cached attribute keys and field metadata
|
||||
|
||||
### Query Optimization
|
||||
|
||||
1. **Time Range Optimization**: When `trace_id` is in filter, query `trace_summary` first to narrow time range
|
||||
2. **Index Usage**: Queries use `ts_bucket_start` for time filtering
|
||||
3. **Limit Enforcement**: Waterfall/flamegraph have span limits (500/50)
|
||||
|
||||
### Best Practices
|
||||
|
||||
1. **Use Query Builder**: Prefer query builder over raw SQL for better optimization
|
||||
2. **Limit Time Ranges**: Always specify reasonable time ranges
|
||||
3. **Use Aggregations**: For large datasets, use aggregations instead of raw data
|
||||
4. **Cache Awareness**: Be mindful of cache TTLs when testing
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
### Key Files
|
||||
|
||||
- `pkg/telemetrytraces/` - Core trace query building
|
||||
- `statement_builder.go` - Trace SQL generation
|
||||
- `field_mapper.go` - Trace field mapping
|
||||
- `condition_builder.go` - Trace filter building
|
||||
- `trace_operator_statement_builder.go` - Trace operator SQL
|
||||
- `pkg/query-service/app/clickhouseReader/reader.go` - Direct trace access
|
||||
- `pkg/query-service/app/http_handler.go` - API handlers
|
||||
- `pkg/query-service/model/trace.go` - Data models
|
||||
|
||||
### Related Documentation
|
||||
|
||||
- [Query Range API Documentation](./QUERY_RANGE_API.md) - Common query_range API details
|
||||
- [OpenTelemetry Specification](https://opentelemetry.io/docs/specs/)
|
||||
- [ClickHouse Documentation](https://clickhouse.com/docs)
|
||||
- [Query Builder Guide](../contributing/go/query-builder.md)
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
When contributing to the traces module:
|
||||
|
||||
1. **Follow Existing Patterns**: Match the style of existing code
|
||||
2. **Add Tests**: Include unit tests for new functionality
|
||||
3. **Update Documentation**: Update this doc for significant changes
|
||||
4. **Consider Performance**: Optimize queries and use caching appropriately
|
||||
5. **Handle Errors**: Provide meaningful error messages
|
||||
|
||||
For questions or help, reach out to the maintainers or open an issue.
|
||||
@@ -103,9 +103,19 @@ Remember to replace the region and ingestion key with proper values as obtained
|
||||
|
||||
Both SigNoz and OTel demo app [frontend-proxy service, to be accurate] share common port allocation at 8080. To prevent port allocation conflicts, modify the OTel demo application config to use port 8081 as the `ENVOY_PORT` value as shown below, and run docker compose command.
|
||||
|
||||
Also, both SigNoz and OTel Demo App have the same `PROMETHEUS_PORT` configured, by default both of them try to start at `9090`, which may cause either of them to fail depending upon which one acquires it first. To prevent this, we need to mofify the value of `PROMETHEUS_PORT` too.
|
||||
|
||||
|
||||
```sh
|
||||
ENVOY_PORT=8081 docker compose up -d
|
||||
ENVOY_PORT=8081 PROMETHEUS_PORT=9091 docker compose up -d
|
||||
```
|
||||
|
||||
Alternatively, we can modify these values using the `.env` file too, which reduces the command as just:
|
||||
|
||||
```sh
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This spins up multiple microservices, with OpenTelemetry instrumentation enabled. you can verify this by,
|
||||
```sh
|
||||
docker compose ps -a
|
||||
|
||||
@@ -232,7 +232,7 @@ func (p *BaseSeasonalProvider) getPredictedSeries(
|
||||
// moving avg of the previous period series + z score threshold * std dev of the series
|
||||
// moving avg of the previous period series - z score threshold * std dev of the series
|
||||
func (p *BaseSeasonalProvider) getBounds(
|
||||
series, predictedSeries *qbtypes.TimeSeries,
|
||||
series, predictedSeries, weekSeries *qbtypes.TimeSeries,
|
||||
zScoreThreshold float64,
|
||||
) (*qbtypes.TimeSeries, *qbtypes.TimeSeries) {
|
||||
upperBoundSeries := &qbtypes.TimeSeries{
|
||||
@@ -246,8 +246,8 @@ func (p *BaseSeasonalProvider) getBounds(
|
||||
}
|
||||
|
||||
for idx, curr := range series.Values {
|
||||
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
|
||||
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
|
||||
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(weekSeries)
|
||||
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(weekSeries)
|
||||
upperBoundSeries.Values = append(upperBoundSeries.Values, &qbtypes.TimeSeriesValue{
|
||||
Timestamp: curr.Timestamp,
|
||||
Value: upperBound,
|
||||
@@ -398,8 +398,6 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UU
|
||||
aggOfInterest := result.Aggregations[0]
|
||||
|
||||
for _, series := range aggOfInterest.Series {
|
||||
stdDev := p.getStdDev(series)
|
||||
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
|
||||
|
||||
pastPeriodSeries := p.getMatchingSeries(ctx, pastPeriodResult, series)
|
||||
currentSeasonSeries := p.getMatchingSeries(ctx, currentSeasonResult, series)
|
||||
@@ -407,6 +405,9 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UU
|
||||
past2SeasonSeries := p.getMatchingSeries(ctx, past2SeasonResult, series)
|
||||
past3SeasonSeries := p.getMatchingSeries(ctx, past3SeasonResult, series)
|
||||
|
||||
stdDev := p.getStdDev(currentSeasonSeries)
|
||||
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
|
||||
|
||||
prevSeriesAvg := p.getAvg(pastPeriodSeries)
|
||||
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
|
||||
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
|
||||
@@ -435,6 +436,7 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UU
|
||||
upperBoundSeries, lowerBoundSeries := p.getBounds(
|
||||
series,
|
||||
predictedSeries,
|
||||
currentSeasonSeries,
|
||||
zScoreThreshold,
|
||||
)
|
||||
aggOfInterest.UpperBoundSeries = append(aggOfInterest.UpperBoundSeries, upperBoundSeries)
|
||||
|
||||
240
ee/authn/callbackauthn/oidccallbackauthn/authn.go
Normal file
@@ -0,0 +1,240 @@
|
||||
package oidccallbackauthn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/authn"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/http/client"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
redirectPath string = "/api/v1/complete/oidc"
|
||||
)
|
||||
|
||||
var defaultScopes []string = []string{"email", "profile", oidc.ScopeOpenID}
|
||||
|
||||
var _ authn.CallbackAuthN = (*AuthN)(nil)
|
||||
|
||||
type AuthN struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
store authtypes.AuthNStore
|
||||
licensing licensing.Licensing
|
||||
httpClient *client.Client
|
||||
}
|
||||
|
||||
func New(store authtypes.AuthNStore, licensing licensing.Licensing, providerSettings factory.ProviderSettings) (*AuthN, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/authn/callbackauthn/oidccallbackauthn")
|
||||
|
||||
httpClient, err := client.New(providerSettings.Logger, providerSettings.TracerProvider, providerSettings.MeterProvider)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AuthN{
|
||||
settings: settings,
|
||||
store: store,
|
||||
licensing: licensing,
|
||||
httpClient: httpClient,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) LoginURL(ctx context.Context, siteURL *url.URL, authDomain *authtypes.AuthDomain) (string, error) {
|
||||
if authDomain.AuthDomainConfig().AuthNProvider != authtypes.AuthNProviderOIDC {
|
||||
return "", errors.Newf(errors.TypeInternal, authtypes.ErrCodeAuthDomainMismatch, "domain type is not oidc")
|
||||
}
|
||||
|
||||
_, oauth2Config, err := a.oidcProviderAndoauth2Config(ctx, siteURL, authDomain)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return oauth2Config.AuthCodeURL(authtypes.NewState(siteURL, authDomain.StorableAuthDomain().ID).URL.String()), nil
|
||||
}
|
||||
|
||||
func (a *AuthN) HandleCallback(ctx context.Context, query url.Values) (*authtypes.CallbackIdentity, error) {
|
||||
if err := query.Get("error"); err != "" {
|
||||
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "oidc: error while authenticating").WithAdditional(query.Get("error_description"))
|
||||
}
|
||||
|
||||
state, err := authtypes.NewStateFromString(query.Get("state"))
|
||||
if err != nil {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, authtypes.ErrCodeInvalidState, "oidc: invalid state").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
authDomain, err := a.store.GetAuthDomainFromID(ctx, state.DomainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = a.licensing.GetActive(ctx, authDomain.StorableAuthDomain().OrgID)
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
oidcProvider, oauth2Config, err := a.oidcProviderAndoauth2Config(ctx, state.URL, authDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx = context.WithValue(ctx, oauth2.HTTPClient, a.httpClient.Client())
|
||||
token, err := oauth2Config.Exchange(ctx, query.Get("code"))
|
||||
if err != nil {
|
||||
var retrieveError *oauth2.RetrieveError
|
||||
if errors.As(err, &retrieveError) {
|
||||
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "oidc: failed to get token").WithAdditional(retrieveError.ErrorDescription).WithAdditional(string(retrieveError.Body))
|
||||
}
|
||||
|
||||
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "oidc: failed to get token").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
claims, err := a.claimsFromIDToken(ctx, authDomain, oidcProvider, token)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if claims == nil && authDomain.AuthDomainConfig().OIDC.GetUserInfo {
|
||||
claims, err = a.claimsFromUserInfo(ctx, oidcProvider, token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
emailClaim, ok := claims[authDomain.AuthDomainConfig().OIDC.ClaimMapping.Email].(string)
|
||||
if !ok {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "oidc: missing email in claims")
|
||||
}
|
||||
|
||||
email, err := valuer.NewEmail(emailClaim)
|
||||
if err != nil {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "oidc: failed to parse email").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
if !authDomain.AuthDomainConfig().OIDC.InsecureSkipEmailVerified {
|
||||
emailVerifiedClaim, ok := claims["email_verified"].(bool)
|
||||
if !ok {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "oidc: missing email_verified in claims")
|
||||
}
|
||||
|
||||
if !emailVerifiedClaim {
|
||||
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "oidc: email is not verified")
|
||||
}
|
||||
}
|
||||
|
||||
name := ""
|
||||
if nameClaim := authDomain.AuthDomainConfig().OIDC.ClaimMapping.Name; nameClaim != "" {
|
||||
if n, ok := claims[nameClaim].(string); ok {
|
||||
name = n
|
||||
}
|
||||
}
|
||||
|
||||
var groups []string
|
||||
if groupsClaim := authDomain.AuthDomainConfig().OIDC.ClaimMapping.Groups; groupsClaim != "" {
|
||||
if claimValue, exists := claims[groupsClaim]; exists {
|
||||
switch g := claimValue.(type) {
|
||||
case []any:
|
||||
for _, group := range g {
|
||||
if gs, ok := group.(string); ok {
|
||||
groups = append(groups, gs)
|
||||
}
|
||||
}
|
||||
case string:
|
||||
// Some IDPs return a single group as a string instead of an array
|
||||
groups = append(groups, g)
|
||||
default:
|
||||
a.settings.Logger().WarnContext(ctx, "oidc: unsupported groups type", "type", fmt.Sprintf("%T", claimValue))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
role := ""
|
||||
if roleClaim := authDomain.AuthDomainConfig().OIDC.ClaimMapping.Role; roleClaim != "" {
|
||||
if r, ok := claims[roleClaim].(string); ok {
|
||||
role = r
|
||||
}
|
||||
}
|
||||
|
||||
return authtypes.NewCallbackIdentity(name, email, authDomain.StorableAuthDomain().OrgID, state, groups, role), nil
|
||||
}
|
||||
|
||||
func (a *AuthN) ProviderInfo(ctx context.Context, authDomain *authtypes.AuthDomain) *authtypes.AuthNProviderInfo {
|
||||
return &authtypes.AuthNProviderInfo{
|
||||
RelayStatePath: nil,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AuthN) oidcProviderAndoauth2Config(ctx context.Context, siteURL *url.URL, authDomain *authtypes.AuthDomain) (*oidc.Provider, *oauth2.Config, error) {
|
||||
if authDomain.AuthDomainConfig().OIDC.IssuerAlias != "" {
|
||||
ctx = oidc.InsecureIssuerURLContext(ctx, authDomain.AuthDomainConfig().OIDC.IssuerAlias)
|
||||
}
|
||||
|
||||
oidcProvider, err := oidc.NewProvider(ctx, authDomain.AuthDomainConfig().OIDC.Issuer)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
scopes := make([]string, len(defaultScopes))
|
||||
copy(scopes, defaultScopes)
|
||||
|
||||
if authDomain.AuthDomainConfig().RoleMapping != nil && len(authDomain.AuthDomainConfig().RoleMapping.GroupMappings) > 0 {
|
||||
scopes = append(scopes, "groups")
|
||||
}
|
||||
|
||||
return oidcProvider, &oauth2.Config{
|
||||
ClientID: authDomain.AuthDomainConfig().OIDC.ClientID,
|
||||
ClientSecret: authDomain.AuthDomainConfig().OIDC.ClientSecret,
|
||||
Endpoint: oidcProvider.Endpoint(),
|
||||
Scopes: scopes,
|
||||
RedirectURL: (&url.URL{
|
||||
Scheme: siteURL.Scheme,
|
||||
Host: siteURL.Host,
|
||||
Path: redirectPath,
|
||||
}).String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) claimsFromIDToken(ctx context.Context, authDomain *authtypes.AuthDomain, provider *oidc.Provider, token *oauth2.Token) (map[string]any, error) {
|
||||
rawIDToken, ok := token.Extra("id_token").(string)
|
||||
if !ok {
|
||||
return nil, errors.New(errors.TypeNotFound, errors.CodeNotFound, "oidc: no id_token in token response")
|
||||
}
|
||||
|
||||
verifier := provider.Verifier(&oidc.Config{ClientID: authDomain.AuthDomainConfig().OIDC.ClientID})
|
||||
idToken, err := verifier.Verify(ctx, rawIDToken)
|
||||
if err != nil {
|
||||
return nil, errors.Newf(errors.TypeForbidden, errors.CodeForbidden, "oidc: failed to verify token").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
var claims map[string]any
|
||||
if err := idToken.Claims(&claims); err != nil {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "oidc: failed to decode claims").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) claimsFromUserInfo(ctx context.Context, provider *oidc.Provider, token *oauth2.Token) (map[string]any, error) {
|
||||
var claims map[string]any
|
||||
|
||||
userInfo, err := provider.UserInfo(ctx, oauth2.StaticTokenSource(&oauth2.Token{
|
||||
AccessToken: token.AccessToken,
|
||||
TokenType: "Bearer", // The UserInfo endpoint requires a bearer token as per RFC6750
|
||||
}))
|
||||
if err != nil {
|
||||
return nil, errors.Newf(errors.TypeInternal, errors.CodeInternal, "oidc: failed to get user info").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
if err := userInfo.Claims(&claims); err != nil {
|
||||
return nil, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "oidc: failed to decode claims").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
182
ee/authn/callbackauthn/samlcallbackauthn/authn.go
Normal file
@@ -0,0 +1,182 @@
|
||||
package samlcallbackauthn
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/authn"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
saml2 "github.com/russellhaering/gosaml2"
|
||||
dsig "github.com/russellhaering/goxmldsig"
|
||||
)
|
||||
|
||||
const (
|
||||
redirectPath string = "/api/v1/complete/saml"
|
||||
)
|
||||
|
||||
var _ authn.CallbackAuthN = (*AuthN)(nil)
|
||||
|
||||
type AuthN struct {
|
||||
store authtypes.AuthNStore
|
||||
licensing licensing.Licensing
|
||||
}
|
||||
|
||||
func New(ctx context.Context, store authtypes.AuthNStore, licensing licensing.Licensing) (*AuthN, error) {
|
||||
return &AuthN{
|
||||
store: store,
|
||||
licensing: licensing,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) LoginURL(ctx context.Context, siteURL *url.URL, authDomain *authtypes.AuthDomain) (string, error) {
|
||||
if authDomain.AuthDomainConfig().AuthNProvider != authtypes.AuthNProviderSAML {
|
||||
return "", errors.Newf(errors.TypeInternal, authtypes.ErrCodeAuthDomainMismatch, "saml: domain type is not saml")
|
||||
}
|
||||
|
||||
sp, err := a.serviceProvider(siteURL, authDomain)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
url, err := sp.BuildAuthURL(authtypes.NewState(siteURL, authDomain.StorableAuthDomain().ID).URL.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return url, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) HandleCallback(ctx context.Context, formValues url.Values) (*authtypes.CallbackIdentity, error) {
|
||||
state, err := authtypes.NewStateFromString(formValues.Get("RelayState"))
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeInvalidInput, authtypes.ErrCodeInvalidState, "saml: invalid state").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
authDomain, err := a.store.GetAuthDomainFromID(ctx, state.DomainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = a.licensing.GetActive(ctx, authDomain.StorableAuthDomain().OrgID)
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
sp, err := a.serviceProvider(state.URL, authDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
assertionInfo, err := sp.RetrieveAssertionInfo(formValues.Get("SAMLResponse"))
|
||||
if err != nil {
|
||||
if errors.As(err, &saml2.ErrVerification{}) {
|
||||
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, err.Error())
|
||||
}
|
||||
|
||||
if errors.As(err, &saml2.ErrMissingElement{}) {
|
||||
return nil, errors.New(errors.TypeNotFound, errors.CodeNotFound, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if assertionInfo.WarningInfo.InvalidTime {
|
||||
return nil, errors.New(errors.TypeForbidden, errors.CodeForbidden, "saml: expired saml response")
|
||||
}
|
||||
|
||||
email, err := valuer.NewEmail(assertionInfo.NameID)
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "saml: invalid email").WithAdditional("The nameID assertion is used to retrieve the email address, please check your IDP configuration and try again.")
|
||||
}
|
||||
|
||||
name := ""
|
||||
if nameAttribute := authDomain.AuthDomainConfig().SAML.AttributeMapping.Name; nameAttribute != "" {
|
||||
if val := assertionInfo.Values.Get(nameAttribute); val != "" {
|
||||
name = val
|
||||
}
|
||||
}
|
||||
|
||||
var groups []string
|
||||
if groupAttribute := authDomain.AuthDomainConfig().SAML.AttributeMapping.Groups; groupAttribute != "" {
|
||||
groups = assertionInfo.Values.GetAll(groupAttribute)
|
||||
}
|
||||
|
||||
role := ""
|
||||
if roleAttribute := authDomain.AuthDomainConfig().SAML.AttributeMapping.Role; roleAttribute != "" {
|
||||
if val := assertionInfo.Values.Get(roleAttribute); val != "" {
|
||||
role = val
|
||||
}
|
||||
}
|
||||
|
||||
return authtypes.NewCallbackIdentity(name, email, authDomain.StorableAuthDomain().OrgID, state, groups, role), nil
|
||||
}
|
||||
|
||||
func (a *AuthN) ProviderInfo(ctx context.Context, authDomain *authtypes.AuthDomain) *authtypes.AuthNProviderInfo {
|
||||
state := authtypes.NewState(&url.URL{Path: "login"}, authDomain.StorableAuthDomain().ID).URL.String()
|
||||
|
||||
return &authtypes.AuthNProviderInfo{
|
||||
RelayStatePath: &state,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *AuthN) serviceProvider(siteURL *url.URL, authDomain *authtypes.AuthDomain) (*saml2.SAMLServiceProvider, error) {
|
||||
certStore, err := a.getCertificateStore(authDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
acsURL := &url.URL{Scheme: siteURL.Scheme, Host: siteURL.Host, Path: redirectPath}
|
||||
|
||||
// Note:
|
||||
// The ServiceProviderIssuer is the client id in case of keycloak. Since we set it to the host here, we need to set the client id == host in keycloak.
|
||||
// For AWSSSO, this is the value of Application SAML audience.
|
||||
return &saml2.SAMLServiceProvider{
|
||||
IdentityProviderSSOURL: authDomain.AuthDomainConfig().SAML.SamlIdp,
|
||||
IdentityProviderIssuer: authDomain.AuthDomainConfig().SAML.SamlEntity,
|
||||
ServiceProviderIssuer: siteURL.Host,
|
||||
AssertionConsumerServiceURL: acsURL.String(),
|
||||
SignAuthnRequests: !authDomain.AuthDomainConfig().SAML.InsecureSkipAuthNRequestsSigned,
|
||||
AllowMissingAttributes: true,
|
||||
IDPCertificateStore: certStore,
|
||||
SPKeyStore: dsig.RandomKeyStoreForTest(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (a *AuthN) getCertificateStore(authDomain *authtypes.AuthDomain) (dsig.X509CertificateStore, error) {
|
||||
certStore := &dsig.MemoryX509CertificateStore{
|
||||
Roots: []*x509.Certificate{},
|
||||
}
|
||||
|
||||
var certBytes []byte
|
||||
if strings.Contains(authDomain.AuthDomainConfig().SAML.SamlCert, "-----BEGIN CERTIFICATE-----") {
|
||||
block, _ := pem.Decode([]byte(authDomain.AuthDomainConfig().SAML.SamlCert))
|
||||
if block == nil {
|
||||
return certStore, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "no valid pem cert found")
|
||||
}
|
||||
|
||||
certBytes = block.Bytes
|
||||
} else {
|
||||
certData, err := base64.StdEncoding.DecodeString(authDomain.AuthDomainConfig().SAML.SamlCert)
|
||||
if err != nil {
|
||||
return certStore, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to read certificate: %s", err.Error())
|
||||
}
|
||||
|
||||
certBytes = certData
|
||||
}
|
||||
|
||||
idpCert, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
return certStore, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "failed to prepare saml request, invalid cert: %s", err.Error())
|
||||
}
|
||||
|
||||
certStore.Roots = append(certStore.Roots, idpCert)
|
||||
|
||||
return certStore, nil
|
||||
}
|
||||
98
ee/authz/openfgaauthz/provider.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package openfgaauthz
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/authz"
|
||||
pkgopenfgaauthz "github.com/SigNoz/signoz/pkg/authz/openfgaauthz"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
openfgav1 "github.com/openfga/api/proto/openfga/v1"
|
||||
openfgapkgtransformer "github.com/openfga/language/pkg/go/transformer"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
pkgAuthzService authz.AuthZ
|
||||
}
|
||||
|
||||
func NewProviderFactory(sqlstore sqlstore.SQLStore, openfgaSchema []openfgapkgtransformer.ModuleFile) factory.ProviderFactory[authz.AuthZ, authz.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("openfga"), func(ctx context.Context, ps factory.ProviderSettings, config authz.Config) (authz.AuthZ, error) {
|
||||
return newOpenfgaProvider(ctx, ps, config, sqlstore, openfgaSchema)
|
||||
})
|
||||
}
|
||||
|
||||
func newOpenfgaProvider(ctx context.Context, settings factory.ProviderSettings, config authz.Config, sqlstore sqlstore.SQLStore, openfgaSchema []openfgapkgtransformer.ModuleFile) (authz.AuthZ, error) {
|
||||
pkgOpenfgaAuthzProvider := pkgopenfgaauthz.NewProviderFactory(sqlstore, openfgaSchema)
|
||||
pkgAuthzService, err := pkgOpenfgaAuthzProvider.New(ctx, settings, config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &provider{
|
||||
pkgAuthzService: pkgAuthzService,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) Start(ctx context.Context) error {
|
||||
return provider.pkgAuthzService.Start(ctx)
|
||||
}
|
||||
|
||||
func (provider *provider) Stop(ctx context.Context) error {
|
||||
return provider.pkgAuthzService.Stop(ctx)
|
||||
}
|
||||
|
||||
func (provider *provider) Check(ctx context.Context, tuple *openfgav1.TupleKey) error {
|
||||
return provider.pkgAuthzService.Check(ctx, tuple)
|
||||
}
|
||||
|
||||
func (provider *provider) CheckWithTupleCreation(ctx context.Context, claims authtypes.Claims, orgID valuer.UUID, relation authtypes.Relation, _ authtypes.Relation, typeable authtypes.Typeable, selectors []authtypes.Selector) error {
|
||||
subject, err := authtypes.NewSubject(authtypes.TypeableUser, claims.UserID, orgID, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tuples, err := typeable.Tuples(subject, relation, selectors, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = provider.BatchCheck(ctx, tuples)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) CheckWithTupleCreationWithoutClaims(ctx context.Context, orgID valuer.UUID, relation authtypes.Relation, _ authtypes.Relation, typeable authtypes.Typeable, selectors []authtypes.Selector) error {
|
||||
subject, err := authtypes.NewSubject(authtypes.TypeableAnonymous, authtypes.AnonymousUser.String(), orgID, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tuples, err := typeable.Tuples(subject, relation, selectors, orgID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = provider.BatchCheck(ctx, tuples)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) BatchCheck(ctx context.Context, tuples []*openfgav1.TupleKey) error {
|
||||
return provider.pkgAuthzService.BatchCheck(ctx, tuples)
|
||||
}
|
||||
|
||||
func (provider *provider) ListObjects(ctx context.Context, subject string, relation authtypes.Relation, typeable authtypes.Typeable) ([]*authtypes.Object, error) {
|
||||
return provider.pkgAuthzService.ListObjects(ctx, subject, relation, typeable)
|
||||
}
|
||||
|
||||
func (provider *provider) Write(ctx context.Context, additions []*openfgav1.TupleKey, deletions []*openfgav1.TupleKey) error {
|
||||
return provider.pkgAuthzService.Write(ctx, additions, deletions)
|
||||
}
|
||||
@@ -15,18 +15,18 @@ type anonymous
|
||||
|
||||
type role
|
||||
relations
|
||||
define assignee: [user]
|
||||
define assignee: [user, anonymous]
|
||||
|
||||
define read: [user, role#assignee]
|
||||
define update: [user, role#assignee]
|
||||
define delete: [user, role#assignee]
|
||||
|
||||
type resources
|
||||
type metaresources
|
||||
relations
|
||||
define create: [user, role#assignee]
|
||||
define list: [user, role#assignee]
|
||||
|
||||
type resource
|
||||
type metaresource
|
||||
relations
|
||||
define read: [user, anonymous, role#assignee]
|
||||
define update: [user, role#assignee]
|
||||
@@ -35,6 +35,6 @@ type resource
|
||||
define block: [user, role#assignee]
|
||||
|
||||
|
||||
type telemetry
|
||||
type telemetryresource
|
||||
relations
|
||||
define read: [user, anonymous, role#assignee]
|
||||
define read: [user, role#assignee]
|
||||
|
||||
282
ee/gateway/httpgateway/provider.go
Normal file
@@ -0,0 +1,282 @@
|
||||
package httpgateway
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/gateway"
|
||||
"github.com/SigNoz/signoz/pkg/http/client"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/types/gatewaytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
type Provider struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
config gateway.Config
|
||||
httpClient *client.Client
|
||||
licensing licensing.Licensing
|
||||
}
|
||||
|
||||
func NewProviderFactory(licensing licensing.Licensing) factory.ProviderFactory[gateway.Gateway, gateway.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("http"), func(ctx context.Context, ps factory.ProviderSettings, c gateway.Config) (gateway.Gateway, error) {
|
||||
return New(ctx, ps, c, licensing)
|
||||
})
|
||||
}
|
||||
|
||||
func New(ctx context.Context, providerSettings factory.ProviderSettings, config gateway.Config, licensing licensing.Licensing) (gateway.Gateway, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/ee/gateway/httpgateway")
|
||||
|
||||
httpClient, err := client.New(
|
||||
settings.Logger(),
|
||||
providerSettings.TracerProvider,
|
||||
providerSettings.MeterProvider,
|
||||
client.WithRequestResponseLog(true),
|
||||
client.WithRetryCount(3),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Provider{
|
||||
settings: settings,
|
||||
config: config,
|
||||
httpClient: httpClient,
|
||||
licensing: licensing,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetIngestionKeys(ctx context.Context, orgID valuer.UUID, page, perPage int) (*gatewaytypes.GettableIngestionKeys, error) {
|
||||
qParams := url.Values{}
|
||||
qParams.Add("page", strconv.Itoa(page))
|
||||
qParams.Add("per_page", strconv.Itoa(perPage))
|
||||
|
||||
responseBody, err := provider.do(ctx, orgID, http.MethodGet, "/v1/workspaces/me/keys", qParams, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ingestionKeys []gatewaytypes.IngestionKey
|
||||
if err := json.Unmarshal([]byte(gjson.GetBytes(responseBody, "data").String()), &ingestionKeys); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pagination gatewaytypes.Pagination
|
||||
if err := json.Unmarshal([]byte(gjson.GetBytes(responseBody, "_pagination").String()), &pagination); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &gatewaytypes.GettableIngestionKeys{
|
||||
Keys: ingestionKeys,
|
||||
Pagination: pagination,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) SearchIngestionKeysByName(ctx context.Context, orgID valuer.UUID, name string, page, perPage int) (*gatewaytypes.GettableIngestionKeys, error) {
|
||||
qParams := url.Values{}
|
||||
qParams.Add("name", name)
|
||||
qParams.Add("page", strconv.Itoa(page))
|
||||
qParams.Add("per_page", strconv.Itoa(perPage))
|
||||
|
||||
responseBody, err := provider.do(ctx, orgID, http.MethodGet, "/v1/workspaces/me/keys/search", qParams, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var ingestionKeys []gatewaytypes.IngestionKey
|
||||
if err := json.Unmarshal([]byte(gjson.GetBytes(responseBody, "data").String()), &ingestionKeys); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pagination gatewaytypes.Pagination
|
||||
if err := json.Unmarshal([]byte(gjson.GetBytes(responseBody, "_pagination").String()), &pagination); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &gatewaytypes.GettableIngestionKeys{
|
||||
Keys: ingestionKeys,
|
||||
Pagination: pagination,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) CreateIngestionKey(ctx context.Context, orgID valuer.UUID, name string, tags []string, expiresAt time.Time) (*gatewaytypes.GettableCreatedIngestionKey, error) {
|
||||
requestBody := gatewaytypes.PostableIngestionKey{
|
||||
Name: name,
|
||||
Tags: tags,
|
||||
ExpiresAt: expiresAt,
|
||||
}
|
||||
requestBodyBytes, err := json.Marshal(requestBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
responseBody, err := provider.do(ctx, orgID, http.MethodPost, "/v1/workspaces/me/keys", nil, requestBodyBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var createdKeyResponse gatewaytypes.GettableCreatedIngestionKey
|
||||
if err := json.Unmarshal([]byte(gjson.GetBytes(responseBody, "data").String()), &createdKeyResponse); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &createdKeyResponse, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) UpdateIngestionKey(ctx context.Context, orgID valuer.UUID, keyID string, name string, tags []string, expiresAt time.Time) error {
|
||||
requestBody := gatewaytypes.PostableIngestionKey{
|
||||
Name: name,
|
||||
Tags: tags,
|
||||
ExpiresAt: expiresAt,
|
||||
}
|
||||
requestBodyBytes, err := json.Marshal(requestBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = provider.do(ctx, orgID, http.MethodPatch, "/v1/workspaces/me/keys/"+keyID, nil, requestBodyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) DeleteIngestionKey(ctx context.Context, orgID valuer.UUID, keyID string) error {
|
||||
_, err := provider.do(ctx, orgID, http.MethodDelete, "/v1/workspaces/me/keys/"+keyID, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) CreateIngestionKeyLimit(ctx context.Context, orgID valuer.UUID, keyID string, signal string, limitConfig gatewaytypes.LimitConfig, tags []string) (*gatewaytypes.GettableCreatedIngestionKeyLimit, error) {
|
||||
requestBody := gatewaytypes.PostableIngestionKeyLimit{
|
||||
Signal: signal,
|
||||
Config: limitConfig,
|
||||
Tags: tags,
|
||||
}
|
||||
requestBodyBytes, err := json.Marshal(requestBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
responseBody, err := provider.do(ctx, orgID, http.MethodPost, "/v1/workspaces/me/keys/"+keyID+"/limits", nil, requestBodyBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var createdIngestionKeyLimitResponse gatewaytypes.GettableCreatedIngestionKeyLimit
|
||||
if err := json.Unmarshal([]byte(gjson.GetBytes(responseBody, "data").String()), &createdIngestionKeyLimitResponse); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &createdIngestionKeyLimitResponse, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) UpdateIngestionKeyLimit(ctx context.Context, orgID valuer.UUID, limitID string, limitConfig gatewaytypes.LimitConfig, tags []string) error {
|
||||
requestBody := gatewaytypes.UpdatableIngestionKeyLimit{
|
||||
Config: limitConfig,
|
||||
Tags: tags,
|
||||
}
|
||||
requestBodyBytes, err := json.Marshal(requestBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = provider.do(ctx, orgID, http.MethodPatch, "/v1/workspaces/me/limits/"+limitID, nil, requestBodyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) DeleteIngestionKeyLimit(ctx context.Context, orgID valuer.UUID, limitID string) error {
|
||||
_, err := provider.do(ctx, orgID, http.MethodDelete, "/v1/workspaces/me/limits/"+limitID, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) do(ctx context.Context, orgID valuer.UUID, method string, path string, queryParams url.Values, body []byte) ([]byte, error) {
|
||||
license, err := provider.licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "no valid license found").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
// build url
|
||||
requestURL := provider.config.URL.JoinPath(path)
|
||||
|
||||
// add query params to the url
|
||||
if queryParams != nil {
|
||||
requestURL.RawQuery = queryParams.Encode()
|
||||
}
|
||||
|
||||
// build request
|
||||
request, err := http.NewRequestWithContext(ctx, method, requestURL.String(), bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// add headers needed to call gateway
|
||||
request.Header.Set("Content-Type", "application/json")
|
||||
request.Header.Set("X-Signoz-Cloud-Api-Key", license.Key)
|
||||
request.Header.Set("X-Consumer-Username", "lid:00000000-0000-0000-0000-000000000000")
|
||||
request.Header.Set("X-Consumer-Groups", "ns:default")
|
||||
|
||||
// execute request
|
||||
response, err := provider.httpClient.Do(request)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// read response
|
||||
defer response.Body.Close()
|
||||
responseBody, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// only 2XX
|
||||
if response.StatusCode/100 == 2 {
|
||||
return responseBody, nil
|
||||
}
|
||||
|
||||
errorMessage := gjson.GetBytes(responseBody, "error").String()
|
||||
if errorMessage == "" {
|
||||
errorMessage = "an unknown error occurred"
|
||||
}
|
||||
|
||||
// return error for non 2XX
|
||||
return nil, provider.errFromStatusCode(response.StatusCode, errorMessage)
|
||||
}
|
||||
|
||||
func (provider *Provider) errFromStatusCode(code int, errorMessage string) error {
|
||||
switch code {
|
||||
case http.StatusBadRequest:
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, errorMessage)
|
||||
case http.StatusUnauthorized:
|
||||
return errors.New(errors.TypeUnauthenticated, errors.CodeUnauthenticated, errorMessage)
|
||||
case http.StatusForbidden:
|
||||
return errors.New(errors.TypeForbidden, errors.CodeForbidden, errorMessage)
|
||||
case http.StatusNotFound:
|
||||
return errors.New(errors.TypeNotFound, errors.CodeNotFound, errorMessage)
|
||||
case http.StatusConflict:
|
||||
return errors.New(errors.TypeAlreadyExists, errors.CodeAlreadyExists, errorMessage)
|
||||
}
|
||||
|
||||
return errors.New(errors.TypeInternal, errors.CodeInternal, errorMessage)
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/authz"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
const (
|
||||
authzDeniedMessage string = "::AUTHZ-DENIED::"
|
||||
)
|
||||
|
||||
type AuthZ struct {
|
||||
logger *slog.Logger
|
||||
authzService authz.AuthZ
|
||||
}
|
||||
|
||||
func NewAuthZ(logger *slog.Logger) *AuthZ {
|
||||
if logger == nil {
|
||||
panic("cannot build authz middleware, logger is empty")
|
||||
}
|
||||
|
||||
return &AuthZ{logger: logger}
|
||||
}
|
||||
|
||||
func (middleware *AuthZ) ViewAccess(next http.HandlerFunc) http.HandlerFunc {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := claims.IsViewer(); err != nil {
|
||||
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, "claims", claims)
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
next(rw, req)
|
||||
})
|
||||
}
|
||||
|
||||
func (middleware *AuthZ) EditAccess(next http.HandlerFunc) http.HandlerFunc {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := claims.IsEditor(); err != nil {
|
||||
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, "claims", claims)
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
next(rw, req)
|
||||
})
|
||||
}
|
||||
|
||||
func (middleware *AuthZ) AdminAccess(next http.HandlerFunc) http.HandlerFunc {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := claims.IsAdmin(); err != nil {
|
||||
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, "claims", claims)
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
next(rw, req)
|
||||
})
|
||||
}
|
||||
|
||||
func (middleware *AuthZ) SelfAccess(next http.HandlerFunc) http.HandlerFunc {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
id := mux.Vars(req)["id"]
|
||||
if err := claims.IsSelfAccess(id); err != nil {
|
||||
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, "claims", claims)
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
next(rw, req)
|
||||
})
|
||||
}
|
||||
|
||||
func (middleware *AuthZ) OpenAccess(next http.HandlerFunc) http.HandlerFunc {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
next(rw, req)
|
||||
})
|
||||
}
|
||||
|
||||
// Check middleware accepts the relation, typeable, parentTypeable (for direct access + group relations) and a callback function to derive selector and parentSelectors on per request basis.
|
||||
func (middleware *AuthZ) Check(next http.HandlerFunc, relation authtypes.Relation, translation authtypes.Relation, typeable authtypes.Typeable, cb authtypes.SelectorCallbackFn) http.HandlerFunc {
|
||||
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
selector, err := cb(req.Context(), claims)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = middleware.authzService.CheckWithTupleCreation(req.Context(), claims, relation, typeable, selector)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
next(rw, req)
|
||||
})
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
package licensing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
)
|
||||
|
||||
@@ -18,7 +18,7 @@ func Config(pollInterval time.Duration, failureThreshold int) licensing.Config {
|
||||
once.Do(func() {
|
||||
config = licensing.Config{PollInterval: pollInterval, FailureThreshold: failureThreshold}
|
||||
if err := config.Validate(); err != nil {
|
||||
panic(fmt.Errorf("invalid licensing config: %w", err))
|
||||
panic(errors.WrapInternalf(err, errors.CodeInternal, "invalid licensing config"))
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
297
ee/modules/dashboard/impldashboard/module.go
Normal file
@@ -0,0 +1,297 @@
|
||||
package impldashboard
|
||||
|
||||
import (
|
||||
"context"
|
||||
"maps"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/licensing"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
pkgimpldashboard "github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/role"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type module struct {
|
||||
pkgDashboardModule dashboard.Module
|
||||
store dashboardtypes.Store
|
||||
settings factory.ScopedProviderSettings
|
||||
role role.Module
|
||||
querier querier.Querier
|
||||
licensing licensing.Licensing
|
||||
}
|
||||
|
||||
func NewModule(store dashboardtypes.Store, settings factory.ProviderSettings, analytics analytics.Analytics, orgGetter organization.Getter, role role.Module, queryParser queryparser.QueryParser, querier querier.Querier, licensing licensing.Licensing) dashboard.Module {
|
||||
scopedProviderSettings := factory.NewScopedProviderSettings(settings, "github.com/SigNoz/signoz/ee/modules/dashboard/impldashboard")
|
||||
pkgDashboardModule := pkgimpldashboard.NewModule(store, settings, analytics, orgGetter, queryParser)
|
||||
|
||||
return &module{
|
||||
pkgDashboardModule: pkgDashboardModule,
|
||||
store: store,
|
||||
settings: scopedProviderSettings,
|
||||
role: role,
|
||||
querier: querier,
|
||||
licensing: licensing,
|
||||
}
|
||||
}
|
||||
|
||||
func (module *module) CreatePublic(ctx context.Context, orgID valuer.UUID, publicDashboard *dashboardtypes.PublicDashboard) error {
|
||||
_, err := module.licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
return errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
storablePublicDashboard, err := module.store.GetPublic(ctx, publicDashboard.DashboardID.StringValue())
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return err
|
||||
}
|
||||
if storablePublicDashboard != nil {
|
||||
return errors.Newf(errors.TypeAlreadyExists, dashboardtypes.ErrCodePublicDashboardAlreadyExists, "dashboard with id %s is already public", storablePublicDashboard.DashboardID)
|
||||
}
|
||||
|
||||
role, err := module.role.GetOrCreate(ctx, roletypes.NewRole(roletypes.AnonymousUserRoleName, roletypes.AnonymousUserRoleDescription, roletypes.RoleTypeManaged.StringValue(), orgID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = module.role.Assign(ctx, role.ID, orgID, authtypes.MustNewSubject(authtypes.TypeableAnonymous, authtypes.AnonymousUser.StringValue(), orgID, nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
additionObject := authtypes.MustNewObject(
|
||||
authtypes.Resource{
|
||||
Name: dashboardtypes.TypeableMetaResourcePublicDashboard.Name(),
|
||||
Type: authtypes.TypeMetaResource,
|
||||
},
|
||||
authtypes.MustNewSelector(authtypes.TypeMetaResource, publicDashboard.ID.String()),
|
||||
)
|
||||
|
||||
err = module.role.PatchObjects(ctx, orgID, role.ID, authtypes.RelationRead, []*authtypes.Object{additionObject}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = module.store.CreatePublic(ctx, dashboardtypes.NewStorablePublicDashboardFromPublicDashboard(publicDashboard))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (module *module) GetPublic(ctx context.Context, orgID valuer.UUID, dashboardID valuer.UUID) (*dashboardtypes.PublicDashboard, error) {
|
||||
_, err := module.licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
storablePublicDashboard, err := module.store.GetPublic(ctx, dashboardID.StringValue())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dashboardtypes.NewPublicDashboardFromStorablePublicDashboard(storablePublicDashboard), nil
|
||||
}
|
||||
|
||||
func (module *module) GetDashboardByPublicID(ctx context.Context, id valuer.UUID) (*dashboardtypes.Dashboard, error) {
|
||||
storableDashboard, err := module.store.GetDashboardByPublicID(ctx, id.StringValue())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dashboardtypes.NewDashboardFromStorableDashboard(storableDashboard), nil
|
||||
}
|
||||
|
||||
func (module *module) GetPublicDashboardSelectorsAndOrg(ctx context.Context, id valuer.UUID, orgs []*types.Organization) ([]authtypes.Selector, valuer.UUID, error) {
|
||||
orgIDs := make([]string, len(orgs))
|
||||
for idx, org := range orgs {
|
||||
orgIDs[idx] = org.ID.StringValue()
|
||||
}
|
||||
|
||||
storableDashboard, err := module.store.GetDashboardByOrgsAndPublicID(ctx, orgIDs, id.StringValue())
|
||||
if err != nil {
|
||||
return nil, valuer.UUID{}, err
|
||||
}
|
||||
|
||||
return []authtypes.Selector{
|
||||
authtypes.MustNewSelector(authtypes.TypeMetaResource, id.StringValue()),
|
||||
}, storableDashboard.OrgID, nil
|
||||
}
|
||||
|
||||
func (module *module) GetPublicWidgetQueryRange(ctx context.Context, id valuer.UUID, widgetIdx, startTime, endTime uint64) (*querybuildertypesv5.QueryRangeResponse, error) {
|
||||
dashboard, err := module.GetDashboardByPublicID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query, err := dashboard.GetWidgetQuery(startTime, endTime, widgetIdx, module.settings.Logger())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return module.querier.QueryRange(ctx, dashboard.OrgID, query)
|
||||
}
|
||||
|
||||
func (module *module) UpdatePublic(ctx context.Context, orgID valuer.UUID, publicDashboard *dashboardtypes.PublicDashboard) error {
|
||||
_, err := module.licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
return errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
return module.store.UpdatePublic(ctx, dashboardtypes.NewStorablePublicDashboardFromPublicDashboard(publicDashboard))
|
||||
}
|
||||
|
||||
func (module *module) Delete(ctx context.Context, orgID valuer.UUID, id valuer.UUID) error {
|
||||
dashboard, err := module.Get(ctx, orgID, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dashboard.Locked {
|
||||
return errors.New(errors.TypeInvalidInput, errors.CodeInvalidInput, "dashboard is locked, please unlock the dashboard to be delete it")
|
||||
}
|
||||
|
||||
err = module.store.RunInTx(ctx, func(ctx context.Context) error {
|
||||
err := module.deletePublic(ctx, orgID, id)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return err
|
||||
}
|
||||
|
||||
err = module.store.Delete(ctx, orgID, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (module *module) DeletePublic(ctx context.Context, orgID valuer.UUID, dashboardID valuer.UUID) error {
|
||||
_, err := module.licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
return errors.New(errors.TypeLicenseUnavailable, errors.CodeLicenseUnavailable, "a valid license is not available").WithAdditional("this feature requires a valid license").WithAdditional(err.Error())
|
||||
}
|
||||
|
||||
publicDashboard, err := module.GetPublic(ctx, orgID, dashboardID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
role, err := module.role.GetOrCreate(ctx, roletypes.NewRole(roletypes.AnonymousUserRoleName, roletypes.AnonymousUserRoleDescription, roletypes.RoleTypeManaged.StringValue(), orgID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deletionObject := authtypes.MustNewObject(
|
||||
authtypes.Resource{
|
||||
Name: dashboardtypes.TypeableMetaResourcePublicDashboard.Name(),
|
||||
Type: authtypes.TypeMetaResource,
|
||||
},
|
||||
authtypes.MustNewSelector(authtypes.TypeMetaResource, publicDashboard.ID.String()),
|
||||
)
|
||||
|
||||
err = module.role.PatchObjects(ctx, orgID, role.ID, authtypes.RelationRead, nil, []*authtypes.Object{deletionObject})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = module.store.DeletePublic(ctx, dashboardID.StringValue())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (module *module) Collect(ctx context.Context, orgID valuer.UUID) (map[string]any, error) {
|
||||
dashboards, err := module.store.List(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
publicDashboards, err := module.store.ListPublic(ctx, orgID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats := make(map[string]any)
|
||||
maps.Copy(stats, dashboardtypes.NewStatsFromStorableDashboards(dashboards))
|
||||
maps.Copy(stats, dashboardtypes.NewStatsFromStorablePublicDashboards(publicDashboards))
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
func (module *module) Create(ctx context.Context, orgID valuer.UUID, createdBy string, creator valuer.UUID, data dashboardtypes.PostableDashboard) (*dashboardtypes.Dashboard, error) {
|
||||
return module.pkgDashboardModule.Create(ctx, orgID, createdBy, creator, data)
|
||||
}
|
||||
|
||||
func (module *module) Get(ctx context.Context, orgID valuer.UUID, id valuer.UUID) (*dashboardtypes.Dashboard, error) {
|
||||
return module.pkgDashboardModule.Get(ctx, orgID, id)
|
||||
}
|
||||
|
||||
func (module *module) GetByMetricNames(ctx context.Context, orgID valuer.UUID, metricNames []string) (map[string][]map[string]string, error) {
|
||||
return module.pkgDashboardModule.GetByMetricNames(ctx, orgID, metricNames)
|
||||
}
|
||||
|
||||
func (module *module) MustGetTypeables() []authtypes.Typeable {
|
||||
return module.pkgDashboardModule.MustGetTypeables()
|
||||
}
|
||||
|
||||
func (module *module) List(ctx context.Context, orgID valuer.UUID) ([]*dashboardtypes.Dashboard, error) {
|
||||
return module.pkgDashboardModule.List(ctx, orgID)
|
||||
}
|
||||
|
||||
func (module *module) Update(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, data dashboardtypes.UpdatableDashboard, diff int) (*dashboardtypes.Dashboard, error) {
|
||||
return module.pkgDashboardModule.Update(ctx, orgID, id, updatedBy, data, diff)
|
||||
}
|
||||
|
||||
func (module *module) LockUnlock(ctx context.Context, orgID valuer.UUID, id valuer.UUID, updatedBy string, role types.Role, lock bool) error {
|
||||
return module.pkgDashboardModule.LockUnlock(ctx, orgID, id, updatedBy, role, lock)
|
||||
}
|
||||
|
||||
func (module *module) deletePublic(ctx context.Context, orgID valuer.UUID, dashboardID valuer.UUID) error {
|
||||
publicDashboard, err := module.store.GetPublic(ctx, dashboardID.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
role, err := module.role.GetOrCreate(ctx, roletypes.NewRole(roletypes.AnonymousUserRoleName, roletypes.AnonymousUserRoleDescription, roletypes.RoleTypeManaged.StringValue(), orgID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
deletionObject := authtypes.MustNewObject(
|
||||
authtypes.Resource{
|
||||
Name: dashboardtypes.TypeableMetaResourcePublicDashboard.Name(),
|
||||
Type: authtypes.TypeMetaResource,
|
||||
},
|
||||
authtypes.MustNewSelector(authtypes.TypeMetaResource, publicDashboard.ID.String()),
|
||||
)
|
||||
|
||||
err = module.role.PatchObjects(ctx, orgID, role.ID, authtypes.RelationRead, nil, []*authtypes.Object{deletionObject})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = module.store.DeletePublic(ctx, dashboardID.StringValue())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/SigNoz/signoz/ee/query-service/usage"
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
"github.com/SigNoz/signoz/pkg/apis/fields"
|
||||
"github.com/SigNoz/signoz/pkg/global"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
querierAPI "github.com/SigNoz/signoz/pkg/querier"
|
||||
baseapp "github.com/SigNoz/signoz/pkg/query-service/app"
|
||||
@@ -19,8 +20,8 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/interfaces"
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
rules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/version"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
@@ -35,10 +36,8 @@ type APIHandlerOptions struct {
|
||||
Gateway *httputil.ReverseProxy
|
||||
GatewayUrl string
|
||||
// Querier Influx Interval
|
||||
FluxInterval time.Duration
|
||||
UseLogsNewSchema bool
|
||||
UseTraceNewSchema bool
|
||||
JWT *authtypes.JWT
|
||||
FluxInterval time.Duration
|
||||
GlobalConfig global.Config
|
||||
}
|
||||
|
||||
type APIHandler struct {
|
||||
@@ -60,6 +59,7 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
|
||||
FieldsAPI: fields.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.TelemetryStore),
|
||||
Signoz: signoz,
|
||||
QuerierAPI: querierAPI.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.Querier, signoz.Analytics),
|
||||
QueryParserAPI: queryparser.NewAPI(signoz.Instrumentation.ToProviderSettings(), signoz.QueryParser),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -92,9 +92,6 @@ func (ah *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
// routes available only in ee version
|
||||
router.HandleFunc("/api/v1/features", am.ViewAccess(ah.getFeatureFlags)).Methods(http.MethodGet)
|
||||
|
||||
// paid plans specific routes
|
||||
router.HandleFunc("/api/v1/complete/saml", am.OpenAccess(ah.receiveSAML)).Methods(http.MethodPost)
|
||||
|
||||
// base overrides
|
||||
router.HandleFunc("/api/v1/version", am.OpenAccess(ah.getVersion)).Methods(http.MethodGet)
|
||||
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
func handleSsoError(w http.ResponseWriter, r *http.Request, redirectURL string) {
|
||||
ssoError := []byte("Login failed. Please contact your system administrator")
|
||||
dst := make([]byte, base64.StdEncoding.EncodedLen(len(ssoError)))
|
||||
base64.StdEncoding.Encode(dst, ssoError)
|
||||
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectURL, string(dst)), http.StatusSeeOther)
|
||||
}
|
||||
|
||||
// receiveSAML completes a SAML request and gets user logged in
|
||||
func (ah *APIHandler) receiveSAML(w http.ResponseWriter, r *http.Request) {
|
||||
// this is the source url that initiated the login request
|
||||
redirectUri := constants.GetDefaultSiteURL()
|
||||
ctx := context.Background()
|
||||
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// the relay state is sent when a login request is submitted to
|
||||
// Idp.
|
||||
relayState := r.FormValue("RelayState")
|
||||
zap.L().Debug("[receiveML] relay state", zap.String("relayState", relayState))
|
||||
|
||||
parsedState, err := url.Parse(relayState)
|
||||
if err != nil || relayState == "" {
|
||||
zap.L().Error("[receiveSAML] failed to process response - invalid response from IDP", zap.Error(err), zap.Any("request", r))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
// upgrade redirect url from the relay state for better accuracy
|
||||
redirectUri = fmt.Sprintf("%s://%s%s", parsedState.Scheme, parsedState.Host, "/login")
|
||||
|
||||
// fetch domain by parsing relay state.
|
||||
domain, err := ah.Signoz.Modules.User.GetDomainFromSsoResponse(ctx, parsedState)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(domain.OrgID)
|
||||
if err != nil {
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = ah.Signoz.Licensing.GetActive(ctx, orgID)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] sso requested but feature unavailable in org domain")
|
||||
http.Redirect(w, r, fmt.Sprintf("%s?ssoerror=%s", redirectUri, "feature unavailable, please upgrade your billing plan to access this feature"), http.StatusMovedPermanently)
|
||||
return
|
||||
}
|
||||
|
||||
sp, err := domain.PrepareSamlRequest(parsedState)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to prepare saml request for domain", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
assertionInfo, err := sp.RetrieveAssertionInfo(r.FormValue("SAMLResponse"))
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to retrieve assertion info from saml response", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
if assertionInfo.WarningInfo.InvalidTime {
|
||||
zap.L().Error("[receiveSAML] expired saml response", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
email := assertionInfo.NameID
|
||||
if email == "" {
|
||||
zap.L().Error("[receiveSAML] invalid email in the SSO response", zap.String("domain", domain.String()))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
nextPage, err := ah.Signoz.Modules.User.PrepareSsoRedirect(ctx, redirectUri, email)
|
||||
if err != nil {
|
||||
zap.L().Error("[receiveSAML] failed to generate redirect URI after successful login ", zap.String("domain", domain.String()), zap.Error(err))
|
||||
handleSsoError(w, r, redirectUri)
|
||||
return
|
||||
}
|
||||
|
||||
http.Redirect(w, r, nextPage, http.StatusSeeOther)
|
||||
}
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
@@ -77,7 +76,7 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
return
|
||||
}
|
||||
|
||||
ingestionUrl, signozApiUrl, apiErr := getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
|
||||
signozApiUrl, apiErr := ah.getIngestionUrlAndSigNozAPIUrl(r.Context(), license.Key)
|
||||
if apiErr != nil {
|
||||
RespondError(w, basemodel.WrapApiError(
|
||||
apiErr, "couldn't deduce ingestion url and signoz api url",
|
||||
@@ -85,7 +84,7 @@ func (ah *APIHandler) CloudIntegrationsGenerateConnectionParams(w http.ResponseW
|
||||
return
|
||||
}
|
||||
|
||||
result.IngestionUrl = ingestionUrl
|
||||
result.IngestionUrl = ah.opts.GlobalConfig.IngestionURL.String()
|
||||
result.SigNozAPIUrl = signozApiUrl
|
||||
|
||||
gatewayUrl := ah.opts.GatewayUrl
|
||||
@@ -168,95 +167,66 @@ func (ah *APIHandler) getOrCreateCloudIntegrationPAT(ctx context.Context, orgId
|
||||
func (ah *APIHandler) getOrCreateCloudIntegrationUser(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) (*types.User, *basemodel.ApiError) {
|
||||
cloudIntegrationUser := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
email := fmt.Sprintf("%s@signoz.io", cloudIntegrationUser)
|
||||
cloudIntegrationUserName := fmt.Sprintf("%s-integration", cloudProvider)
|
||||
email := valuer.MustNewEmail(fmt.Sprintf("%s@signoz.io", cloudIntegrationUserName))
|
||||
|
||||
integrationUserResult, err := ah.Signoz.Modules.User.GetUserByEmailInOrg(ctx, orgId, email)
|
||||
if err != nil && !errors.Ast(err, errors.TypeNotFound) {
|
||||
return nil, basemodel.NotFoundError(fmt.Errorf("couldn't look for integration user: %w", err))
|
||||
}
|
||||
|
||||
if integrationUserResult != nil {
|
||||
return &integrationUserResult.User, nil
|
||||
}
|
||||
|
||||
zap.L().Info(
|
||||
"cloud integration user not found. Attempting to create the user",
|
||||
zap.String("cloudProvider", cloudProvider),
|
||||
)
|
||||
|
||||
newUser, err := types.NewUser(cloudIntegrationUser, email, types.RoleViewer.String(), orgId)
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't create cloud integration user: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
password := types.MustGenerateFactorPassword(newUser.ID.StringValue())
|
||||
|
||||
err = ah.Signoz.Modules.User.CreateUser(ctx, newUser, user.WithFactorPassword(password))
|
||||
cloudIntegrationUser, err := types.NewUser(cloudIntegrationUserName, email, types.RoleViewer, valuer.MustNewUUID(orgId))
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't create cloud integration user: %w", err))
|
||||
}
|
||||
|
||||
return newUser, nil
|
||||
password := types.MustGenerateFactorPassword(cloudIntegrationUser.ID.StringValue())
|
||||
|
||||
cloudIntegrationUser, err = ah.Signoz.Modules.User.GetOrCreateUser(ctx, cloudIntegrationUser, user.WithFactorPassword(password))
|
||||
if err != nil {
|
||||
return nil, basemodel.InternalError(fmt.Errorf("couldn't look for integration user: %w", err))
|
||||
}
|
||||
|
||||
return cloudIntegrationUser, nil
|
||||
}
|
||||
|
||||
func getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
|
||||
string, string, *basemodel.ApiError,
|
||||
func (ah *APIHandler) getIngestionUrlAndSigNozAPIUrl(ctx context.Context, licenseKey string) (
|
||||
string, *basemodel.ApiError,
|
||||
) {
|
||||
url := fmt.Sprintf(
|
||||
"%s%s",
|
||||
strings.TrimSuffix(constants.ZeusURL, "/"),
|
||||
"/v2/deployments/me",
|
||||
)
|
||||
|
||||
// TODO: remove this struct from here
|
||||
type deploymentResponse struct {
|
||||
Status string `json:"status"`
|
||||
Error string `json:"error"`
|
||||
Data struct {
|
||||
Name string `json:"name"`
|
||||
|
||||
ClusterInfo struct {
|
||||
Region struct {
|
||||
DNS string `json:"dns"`
|
||||
} `json:"region"`
|
||||
} `json:"cluster"`
|
||||
} `json:"data"`
|
||||
Name string `json:"name"`
|
||||
ClusterInfo struct {
|
||||
Region struct {
|
||||
DNS string `json:"dns"`
|
||||
} `json:"region"`
|
||||
} `json:"cluster"`
|
||||
}
|
||||
|
||||
resp, apiErr := requestAndParseResponse[deploymentResponse](
|
||||
ctx, url, map[string]string{"X-Signoz-Cloud-Api-Key": licenseKey}, nil,
|
||||
)
|
||||
|
||||
if apiErr != nil {
|
||||
return "", "", basemodel.WrapApiError(
|
||||
apiErr, "couldn't query for deployment info",
|
||||
)
|
||||
}
|
||||
|
||||
if resp.Status != "success" {
|
||||
return "", "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't query for deployment info: status: %s, error: %s",
|
||||
resp.Status, resp.Error,
|
||||
respBytes, err := ah.Signoz.Zeus.GetDeployment(ctx, licenseKey)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't query for deployment info: error: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
regionDns := resp.Data.ClusterInfo.Region.DNS
|
||||
deploymentName := resp.Data.Name
|
||||
resp := new(deploymentResponse)
|
||||
|
||||
err = json.Unmarshal(respBytes, resp)
|
||||
if err != nil {
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"couldn't unmarshal deployment info response: error: %w", err,
|
||||
))
|
||||
}
|
||||
|
||||
regionDns := resp.ClusterInfo.Region.DNS
|
||||
deploymentName := resp.Name
|
||||
|
||||
if len(regionDns) < 1 || len(deploymentName) < 1 {
|
||||
// Fail early if actual response structure and expectation here ever diverge
|
||||
return "", "", basemodel.InternalError(fmt.Errorf(
|
||||
return "", basemodel.InternalError(fmt.Errorf(
|
||||
"deployment info response not in expected shape. couldn't determine region dns and deployment name",
|
||||
))
|
||||
}
|
||||
|
||||
ingestionUrl := fmt.Sprintf("https://ingest.%s", regionDns)
|
||||
|
||||
signozApiUrl := fmt.Sprintf("https://%s.%s", deploymentName, regionDns)
|
||||
|
||||
return ingestionUrl, signozApiUrl, nil
|
||||
return signozApiUrl, nil
|
||||
}
|
||||
|
||||
type ingestionKey struct {
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/ee/query-service/constants"
|
||||
pkgError "github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/featuretypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/licensetypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"go.uber.org/zap"
|
||||
@@ -25,11 +26,7 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
orgID, err := valuer.NewUUID(claims.OrgID)
|
||||
if err != nil {
|
||||
render.Error(w, pkgError.Newf(pkgError.TypeInvalidInput, pkgError.CodeInvalidInput, "orgId is invalid"))
|
||||
return
|
||||
}
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
featureSet, err := ah.Signoz.Licensing.GetFeatureFlags(r.Context(), orgID)
|
||||
if err != nil {
|
||||
@@ -59,13 +56,16 @@ func (ah *APIHandler) getFeatureFlags(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
if constants.IsPreferSpanMetrics {
|
||||
for idx, feature := range featureSet {
|
||||
if feature.Name == licensetypes.UseSpanMetrics {
|
||||
featureSet[idx].Active = true
|
||||
}
|
||||
}
|
||||
}
|
||||
evalCtx := featuretypes.NewFlaggerEvaluationContext(orgID)
|
||||
useSpanMetrics := ah.Signoz.Flagger.BooleanOrEmpty(ctx, flagger.FeatureUseSpanMetrics, evalCtx)
|
||||
|
||||
featureSet = append(featureSet, &licensetypes.Feature{
|
||||
Name: valuer.NewString(flagger.FeatureUseSpanMetrics.String()),
|
||||
Active: useSpanMetrics,
|
||||
Usage: 0,
|
||||
UsageLimit: -1,
|
||||
Route: "",
|
||||
})
|
||||
|
||||
if constants.IsDotMetricsEnabled {
|
||||
for idx, feature := range featureSet {
|
||||
|
||||
@@ -257,18 +257,20 @@ func (aH *APIHandler) queryRangeV5(rw http.ResponseWriter, req *http.Request) {
|
||||
results = append(results, item)
|
||||
}
|
||||
|
||||
// Build step intervals from the anomaly query
|
||||
stepIntervals := make(map[string]uint64)
|
||||
if anomalyQuery.StepInterval.Duration > 0 {
|
||||
stepIntervals[anomalyQuery.Name] = uint64(anomalyQuery.StepInterval.Duration.Seconds())
|
||||
}
|
||||
|
||||
finalResp := &qbtypes.QueryRangeResponse{
|
||||
Type: queryRangeRequest.RequestType,
|
||||
Data: struct {
|
||||
Results []any `json:"results"`
|
||||
}{
|
||||
Data: qbtypes.QueryData{
|
||||
Results: results,
|
||||
},
|
||||
Meta: struct {
|
||||
RowsScanned uint64 `json:"rowsScanned"`
|
||||
BytesScanned uint64 `json:"bytesScanned"`
|
||||
DurationMS uint64 `json:"durationMs"`
|
||||
}{},
|
||||
Meta: qbtypes.ExecStats{
|
||||
StepIntervals: stepIntervals,
|
||||
},
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, finalResp)
|
||||
|
||||
@@ -3,12 +3,18 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net"
|
||||
"net/http"
|
||||
_ "net/http/pprof" // http profiler
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/cache/memorycache"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/queryparser"
|
||||
"github.com/SigNoz/signoz/pkg/ruler/rulestore/sqlrulestore"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
|
||||
@@ -25,7 +31,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/signoz"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/web"
|
||||
"github.com/rs/cors"
|
||||
"github.com/soheilhy/cmux"
|
||||
@@ -50,7 +55,6 @@ import (
|
||||
type Server struct {
|
||||
config signoz.Config
|
||||
signoz *signoz.SigNoz
|
||||
jwt *authtypes.JWT
|
||||
ruleManager *baserules.Manager
|
||||
|
||||
// public http router
|
||||
@@ -67,19 +71,32 @@ type Server struct {
|
||||
}
|
||||
|
||||
// NewServer creates and initializes Server
|
||||
func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT) (*Server, error) {
|
||||
func NewServer(config signoz.Config, signoz *signoz.SigNoz) (*Server, error) {
|
||||
gatewayProxy, err := gateway.NewProxy(config.Gateway.URL.String(), gateway.RoutePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheForTraceDetail, err := memorycache.New(context.TODO(), signoz.Instrumentation.ToProviderSettings(), cache.Config{
|
||||
Provider: "memory",
|
||||
Memory: cache.Memory{
|
||||
NumCounters: 10 * 10000,
|
||||
MaxCost: 1 << 27, // 128 MB
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader := clickhouseReader.NewReader(
|
||||
signoz.SQLStore,
|
||||
signoz.TelemetryStore,
|
||||
signoz.Prometheus,
|
||||
signoz.TelemetryStore.Cluster(),
|
||||
config.Querier.FluxInterval,
|
||||
cacheForTraceDetail,
|
||||
signoz.Cache,
|
||||
nil,
|
||||
)
|
||||
|
||||
rm, err := makeRulesManager(
|
||||
@@ -88,10 +105,12 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
|
||||
signoz.Alertmanager,
|
||||
signoz.SQLStore,
|
||||
signoz.TelemetryStore,
|
||||
signoz.TelemetryMetadataStore,
|
||||
signoz.Prometheus,
|
||||
signoz.Modules.OrgGetter,
|
||||
signoz.Querier,
|
||||
signoz.Instrumentation.Logger(),
|
||||
signoz.Instrumentation.ToProviderSettings(),
|
||||
signoz.QueryParser,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -153,7 +172,7 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
|
||||
FluxInterval: config.Querier.FluxInterval,
|
||||
Gateway: gatewayProxy,
|
||||
GatewayUrl: config.Gateway.URL.String(),
|
||||
JWT: jwt,
|
||||
GlobalConfig: config.Global,
|
||||
}
|
||||
|
||||
apiHandler, err := api.NewAPIHandler(apiOpts, signoz)
|
||||
@@ -164,7 +183,6 @@ func NewServer(config signoz.Config, signoz *signoz.SigNoz, jwt *authtypes.JWT)
|
||||
s := &Server{
|
||||
config: config,
|
||||
signoz: signoz,
|
||||
jwt: jwt,
|
||||
ruleManager: rm,
|
||||
httpHostPort: baseconst.HTTPHostPort,
|
||||
unavailableChannel: make(chan healthcheck.Status),
|
||||
@@ -193,9 +211,19 @@ func (s Server) HealthCheckStatus() chan healthcheck.Status {
|
||||
|
||||
func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*http.Server, error) {
|
||||
r := baseapp.NewRouter()
|
||||
am := middleware.NewAuthZ(s.signoz.Instrumentation.Logger())
|
||||
am := middleware.NewAuthZ(s.signoz.Instrumentation.Logger(), s.signoz.Modules.OrgGetter, s.signoz.Authz)
|
||||
|
||||
r.Use(middleware.NewAuth(s.jwt, []string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Instrumentation.Logger()).Wrap)
|
||||
r.Use(otelmux.Middleware(
|
||||
"apiserver",
|
||||
otelmux.WithMeterProvider(s.signoz.Instrumentation.MeterProvider()),
|
||||
otelmux.WithTracerProvider(s.signoz.Instrumentation.TracerProvider()),
|
||||
otelmux.WithPropagators(propagation.NewCompositeTextMapPropagator(propagation.Baggage{}, propagation.TraceContext{})),
|
||||
otelmux.WithFilter(func(r *http.Request) bool {
|
||||
return !slices.Contains([]string{"/api/v1/health"}, r.URL.Path)
|
||||
}),
|
||||
otelmux.WithPublicEndpoint(),
|
||||
))
|
||||
r.Use(middleware.NewAuthN([]string{"Authorization", "Sec-WebSocket-Protocol"}, s.signoz.Sharder, s.signoz.Tokenizer, s.signoz.Instrumentation.Logger()).Wrap)
|
||||
r.Use(middleware.NewAPIKey(s.signoz.SQLStore, []string{"SIGNOZ-API-KEY"}, s.signoz.Instrumentation.Logger(), s.signoz.Sharder).Wrap)
|
||||
r.Use(middleware.NewTimeout(s.signoz.Instrumentation.Logger(),
|
||||
s.config.APIServer.Timeout.ExcludedRoutes,
|
||||
@@ -220,6 +248,11 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
apiHandler.MetricExplorerRoutes(r, am)
|
||||
apiHandler.RegisterTraceFunnelsRoutes(r, am)
|
||||
|
||||
err := s.signoz.APIServer.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := cors.New(cors.Options{
|
||||
AllowedOrigins: []string{"*"},
|
||||
AllowedMethods: []string{"GET", "DELETE", "POST", "PUT", "PATCH", "OPTIONS"},
|
||||
@@ -230,7 +263,7 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
|
||||
handler = handlers.CompressHandler(handler)
|
||||
|
||||
err := web.AddToRouter(r)
|
||||
err = web.AddToRouter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -325,18 +358,19 @@ func (s *Server) Stop(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeRulesManager(ch baseint.Reader, cache cache.Cache, alertmanager alertmanager.Alertmanager, sqlstore sqlstore.SQLStore, telemetryStore telemetrystore.TelemetryStore, prometheus prometheus.Prometheus, orgGetter organization.Getter, querier querier.Querier, logger *slog.Logger) (*baserules.Manager, error) {
|
||||
ruleStore := sqlrulestore.NewRuleStore(sqlstore)
|
||||
func makeRulesManager(ch baseint.Reader, cache cache.Cache, alertmanager alertmanager.Alertmanager, sqlstore sqlstore.SQLStore, telemetryStore telemetrystore.TelemetryStore, metadataStore telemetrytypes.MetadataStore, prometheus prometheus.Prometheus, orgGetter organization.Getter, querier querier.Querier, providerSettings factory.ProviderSettings, queryParser queryparser.QueryParser) (*baserules.Manager, error) {
|
||||
ruleStore := sqlrulestore.NewRuleStore(sqlstore, queryParser, providerSettings)
|
||||
maintenanceStore := sqlrulestore.NewMaintenanceStore(sqlstore)
|
||||
// create manager opts
|
||||
managerOpts := &baserules.ManagerOptions{
|
||||
TelemetryStore: telemetryStore,
|
||||
MetadataStore: metadataStore,
|
||||
Prometheus: prometheus,
|
||||
Context: context.Background(),
|
||||
Logger: zap.L(),
|
||||
Reader: ch,
|
||||
Querier: querier,
|
||||
SLogger: logger,
|
||||
SLogger: providerSettings.Logger,
|
||||
Cache: cache,
|
||||
EvalDelay: baseconst.GetEvalDelay(),
|
||||
PrepareTaskFunc: rules.PrepareTaskFunc,
|
||||
@@ -346,6 +380,7 @@ func makeRulesManager(ch baseint.Reader, cache cache.Cache, alertmanager alertma
|
||||
RuleStore: ruleStore,
|
||||
MaintenanceStore: maintenanceStore,
|
||||
SqlStore: sqlstore,
|
||||
QueryParser: queryParser,
|
||||
}
|
||||
|
||||
// create Manager
|
||||
|
||||
@@ -4,19 +4,12 @@ import (
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultSiteURL = "https://localhost:8080"
|
||||
)
|
||||
|
||||
var LicenseSignozIo = "https://license.signoz.io/api/v1"
|
||||
var LicenseAPIKey = GetOrDefaultEnv("SIGNOZ_LICENSE_API_KEY", "")
|
||||
var SaasSegmentKey = GetOrDefaultEnv("SIGNOZ_SAAS_SEGMENT_KEY", "")
|
||||
var FetchFeatures = GetOrDefaultEnv("FETCH_FEATURES", "false")
|
||||
var ZeusFeaturesURL = GetOrDefaultEnv("ZEUS_FEATURES_URL", "ZeusFeaturesURL")
|
||||
|
||||
// this is set via build time variable
|
||||
var ZeusURL = "https://api.signoz.cloud"
|
||||
|
||||
func GetOrDefaultEnv(key string, fallback string) string {
|
||||
v := os.Getenv(key)
|
||||
if len(v) == 0 {
|
||||
@@ -27,24 +20,12 @@ func GetOrDefaultEnv(key string, fallback string) string {
|
||||
|
||||
// constant functions that override env vars
|
||||
|
||||
// GetDefaultSiteURL returns default site url, primarily
|
||||
// used to send saml request and allowing backend to
|
||||
// handle http redirect
|
||||
func GetDefaultSiteURL() string {
|
||||
return GetOrDefaultEnv("SIGNOZ_SITE_URL", DefaultSiteURL)
|
||||
}
|
||||
|
||||
const DotMetricsEnabled = "DOT_METRICS_ENABLED"
|
||||
|
||||
var IsDotMetricsEnabled = false
|
||||
var IsPreferSpanMetrics = false
|
||||
|
||||
func init() {
|
||||
if GetOrDefaultEnv(DotMetricsEnabled, "true") == "true" {
|
||||
IsDotMetricsEnabled = true
|
||||
}
|
||||
|
||||
if GetOrDefaultEnv("USE_SPAN_METRICS", "false") == "true" {
|
||||
IsPreferSpanMetrics = true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,11 +78,6 @@ func NewAnomalyRule(
|
||||
|
||||
opts = append(opts, baserules.WithLogger(logger))
|
||||
|
||||
if p.RuleCondition.CompareOp == ruletypes.ValueIsBelow {
|
||||
target := -1 * *p.RuleCondition.Target
|
||||
p.RuleCondition.Target = &target
|
||||
}
|
||||
|
||||
baseRule, err := baserules.NewBaseRule(id, orgID, p, reader, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -245,13 +240,14 @@ func (r *AnomalyRule) buildAndRunQuery(ctx context.Context, orgID valuer.UUID, t
|
||||
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
|
||||
|
||||
for _, series := range queryResult.AnomalyScores {
|
||||
if r.Condition() != nil && r.Condition().RequireMinPoints {
|
||||
if len(series.Points) < r.Condition().RequiredNumPoints {
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
continue
|
||||
}
|
||||
if !r.Condition().ShouldEval(series) {
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
continue
|
||||
}
|
||||
results, err := r.Threshold.ShouldAlert(*series, r.Unit())
|
||||
results, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
|
||||
ActiveAlerts: r.ActiveAlertsLabelFP(),
|
||||
SendUnmatched: r.ShouldSendUnmatched(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -294,14 +290,27 @@ func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID,
|
||||
scoresJSON, _ := json.Marshal(queryResult.AnomalyScores)
|
||||
r.logger.InfoContext(ctx, "anomaly scores", "scores", string(scoresJSON))
|
||||
|
||||
for _, series := range queryResult.AnomalyScores {
|
||||
if r.Condition().RequireMinPoints {
|
||||
if len(series.Points) < r.Condition().RequiredNumPoints {
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
continue
|
||||
}
|
||||
// Filter out new series if newGroupEvalDelay is configured
|
||||
seriesToProcess := queryResult.AnomalyScores
|
||||
if r.ShouldSkipNewGroups() {
|
||||
filteredSeries, filterErr := r.BaseRule.FilterNewSeries(ctx, ts, seriesToProcess)
|
||||
// In case of error we log the error and continue with the original series
|
||||
if filterErr != nil {
|
||||
r.logger.ErrorContext(ctx, "Error filtering new series, ", "error", filterErr, "rule_name", r.Name())
|
||||
} else {
|
||||
seriesToProcess = filteredSeries
|
||||
}
|
||||
results, err := r.Threshold.ShouldAlert(*series, r.Unit())
|
||||
}
|
||||
|
||||
for _, series := range seriesToProcess {
|
||||
if !r.Condition().ShouldEval(series) {
|
||||
r.logger.InfoContext(ctx, "not enough data points to evaluate series, skipping", "ruleid", r.ID(), "numPoints", len(series.Points), "requiredPoints", r.Condition().RequiredNumPoints)
|
||||
continue
|
||||
}
|
||||
results, err := r.Threshold.Eval(*series, r.Unit(), ruletypes.EvalData{
|
||||
ActiveAlerts: r.ActiveAlertsLabelFP(),
|
||||
SendUnmatched: r.ShouldSendUnmatched(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -310,7 +319,7 @@ func (r *AnomalyRule) buildAndRunQueryV5(ctx context.Context, orgID valuer.UUID,
|
||||
return resultVector, nil
|
||||
}
|
||||
|
||||
func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, error) {
|
||||
func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (int, error) {
|
||||
|
||||
prevState := r.State()
|
||||
|
||||
@@ -327,7 +336,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
res, err = r.buildAndRunQuery(ctx, r.OrgID(), ts)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
r.mtx.Lock()
|
||||
@@ -402,7 +411,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
if _, ok := alerts[h]; ok {
|
||||
r.logger.ErrorContext(ctx, "the alert query returns duplicate records", "rule_id", r.ID(), "alert", alerts[h])
|
||||
err = fmt.Errorf("duplicate alert found, vector contains metrics with the same labelset after applying alert labels")
|
||||
return nil, err
|
||||
return 0, err
|
||||
}
|
||||
|
||||
alerts[h] = &ruletypes.Alert{
|
||||
@@ -415,6 +424,7 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
GeneratorURL: r.GeneratorURL(),
|
||||
Receivers: ruleReceiverMap[lbs.Map()[ruletypes.LabelThresholdName]],
|
||||
Missing: smpl.IsMissing,
|
||||
IsRecovering: smpl.IsRecovering,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -427,6 +437,9 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
|
||||
alert.Value = a.Value
|
||||
alert.Annotations = a.Annotations
|
||||
// Update the recovering and missing state of existing alert
|
||||
alert.IsRecovering = a.IsRecovering
|
||||
alert.Missing = a.Missing
|
||||
if v, ok := alert.Labels.Map()[ruletypes.LabelThresholdName]; ok {
|
||||
alert.Receivers = ruleReceiverMap[v]
|
||||
}
|
||||
@@ -485,6 +498,30 @@ func (r *AnomalyRule) Eval(ctx context.Context, ts time.Time) (interface{}, erro
|
||||
Value: a.Value,
|
||||
})
|
||||
}
|
||||
|
||||
// We need to change firing alert to recovering if the returned sample meets recovery threshold
|
||||
changeFiringToRecovering := a.State == model.StateFiring && a.IsRecovering
|
||||
// We need to change recovering alerts to firing if the returned sample meets target threshold
|
||||
changeRecoveringToFiring := a.State == model.StateRecovering && !a.IsRecovering && !a.Missing
|
||||
// in any of the above case we need to update the status of alert
|
||||
if changeFiringToRecovering || changeRecoveringToFiring {
|
||||
state := model.StateRecovering
|
||||
if changeRecoveringToFiring {
|
||||
state = model.StateFiring
|
||||
}
|
||||
a.State = state
|
||||
r.logger.DebugContext(ctx, "converting alert state", "name", r.Name(), "state", state)
|
||||
itemsToAdd = append(itemsToAdd, model.RuleStateHistory{
|
||||
RuleID: r.ID(),
|
||||
RuleName: r.Name(),
|
||||
State: state,
|
||||
StateChanged: true,
|
||||
UnixMilli: ts.UnixMilli(),
|
||||
Labels: model.LabelsString(labelsJSON),
|
||||
Fingerprint: a.QueryResultLables.Hash(),
|
||||
Value: a.Value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
currentState := r.State()
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
basemodel "github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
baserules "github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/utils/labels"
|
||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
@@ -37,6 +37,8 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
opts.SLogger,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
baserules.WithQueryParser(opts.ManagerOpts.QueryParser),
|
||||
baserules.WithMetadataStore(opts.ManagerOpts.MetadataStore),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -45,7 +47,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
|
||||
rules = append(rules, tr)
|
||||
|
||||
// create ch rule task for evalution
|
||||
// create ch rule task for evaluation
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(evaluation.GetFrequency()), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
|
||||
} else if opts.Rule.RuleType == ruletypes.RuleTypeProm {
|
||||
@@ -59,6 +61,8 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
opts.Reader,
|
||||
opts.ManagerOpts.Prometheus,
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
baserules.WithQueryParser(opts.ManagerOpts.QueryParser),
|
||||
baserules.WithMetadataStore(opts.ManagerOpts.MetadataStore),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -67,7 +71,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
|
||||
rules = append(rules, pr)
|
||||
|
||||
// create promql rule task for evalution
|
||||
// create promql rule task for evaluation
|
||||
task = newTask(baserules.TaskTypeProm, opts.TaskName, time.Duration(evaluation.GetFrequency()), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
|
||||
} else if opts.Rule.RuleType == ruletypes.RuleTypeAnomaly {
|
||||
@@ -82,6 +86,8 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
opts.Cache,
|
||||
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
baserules.WithQueryParser(opts.ManagerOpts.QueryParser),
|
||||
baserules.WithMetadataStore(opts.ManagerOpts.MetadataStore),
|
||||
)
|
||||
if err != nil {
|
||||
return task, err
|
||||
@@ -89,7 +95,7 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
|
||||
|
||||
rules = append(rules, ar)
|
||||
|
||||
// create anomaly rule task for evalution
|
||||
// create anomaly rule task for evaluation
|
||||
task = newTask(baserules.TaskTypeCh, opts.TaskName, time.Duration(evaluation.GetFrequency()), rules, opts.ManagerOpts, opts.NotifyFunc, opts.MaintenanceStore, opts.OrgID)
|
||||
|
||||
} else {
|
||||
@@ -140,6 +146,8 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
baserules.WithSendAlways(),
|
||||
baserules.WithSendUnmatched(),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
baserules.WithQueryParser(opts.ManagerOpts.QueryParser),
|
||||
baserules.WithMetadataStore(opts.ManagerOpts.MetadataStore),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -160,6 +168,8 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
baserules.WithSendAlways(),
|
||||
baserules.WithSendUnmatched(),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
baserules.WithQueryParser(opts.ManagerOpts.QueryParser),
|
||||
baserules.WithMetadataStore(opts.ManagerOpts.MetadataStore),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@@ -179,6 +189,8 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
baserules.WithSendAlways(),
|
||||
baserules.WithSendUnmatched(),
|
||||
baserules.WithSQLStore(opts.SQLStore),
|
||||
baserules.WithQueryParser(opts.ManagerOpts.QueryParser),
|
||||
baserules.WithMetadataStore(opts.ManagerOpts.MetadataStore),
|
||||
)
|
||||
if err != nil {
|
||||
zap.L().Error("failed to prepare a new anomaly rule for test", zap.String("name", alertname), zap.Error(err))
|
||||
@@ -191,16 +203,12 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
|
||||
// set timestamp to current utc time
|
||||
ts := time.Now().UTC()
|
||||
|
||||
count, err := rule.Eval(ctx, ts)
|
||||
alertsFound, err := rule.Eval(ctx, ts)
|
||||
if err != nil {
|
||||
zap.L().Error("evaluating rule failed", zap.String("rule", rule.Name()), zap.Error(err))
|
||||
return 0, basemodel.InternalError(fmt.Errorf("rule evaluation failed"))
|
||||
}
|
||||
alertsFound, ok := count.(int)
|
||||
if !ok {
|
||||
return 0, basemodel.InternalError(fmt.Errorf("something went wrong"))
|
||||
}
|
||||
rule.SendAlerts(ctx, ts, 0, time.Duration(1*time.Minute), opts.NotifyFunc)
|
||||
rule.SendAlerts(ctx, ts, 0, time.Minute, opts.NotifyFunc)
|
||||
|
||||
return alertsFound, nil
|
||||
}
|
||||
|
||||
283
ee/query-service/rules/manager_test.go
Normal file
@@ -0,0 +1,283 @@
|
||||
package rules
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager"
|
||||
alertmanagermock "github.com/SigNoz/signoz/pkg/alertmanager/alertmanagertest"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus"
|
||||
"github.com/SigNoz/signoz/pkg/prometheus/prometheustest"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/rules"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore/sqlstoretest"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore/telemetrystoretest"
|
||||
"github.com/SigNoz/signoz/pkg/types/alertmanagertypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmock "github.com/srikanthccv/ClickHouse-go-mock"
|
||||
)
|
||||
|
||||
func TestManager_TestNotification_SendUnmatched_ThresholdRule(t *testing.T) {
|
||||
target := 10.0
|
||||
recovery := 5.0
|
||||
|
||||
for _, tc := range rules.TcTestNotiSendUnmatchedThresholdRule {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
rule := rules.ThresholdRuleAtLeastOnceValueAbove(target, &recovery)
|
||||
|
||||
// Marshal rule to JSON as TestNotification expects
|
||||
ruleBytes, err := json.Marshal(rule)
|
||||
require.NoError(t, err)
|
||||
|
||||
orgID := valuer.GenerateUUID()
|
||||
|
||||
// for saving temp alerts that are triggered via TestNotification
|
||||
triggeredTestAlerts := []map[*alertmanagertypes.PostableAlert][]string{}
|
||||
|
||||
// Create manager using test factory with hooks
|
||||
mgr := rules.NewTestManager(t, &rules.TestManagerOptions{
|
||||
AlertmanagerHook: func(am alertmanager.Alertmanager) {
|
||||
fAlert := am.(*alertmanagermock.MockAlertmanager)
|
||||
// mock set notification config
|
||||
fAlert.On("SetNotificationConfig", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
// for saving temp alerts that are triggered via TestNotification
|
||||
if tc.ExpectAlerts > 0 {
|
||||
fAlert.On("TestAlert", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
|
||||
triggeredTestAlerts = append(triggeredTestAlerts, args.Get(3).(map[*alertmanagertypes.PostableAlert][]string))
|
||||
}).Return(nil).Times(tc.ExpectAlerts)
|
||||
}
|
||||
},
|
||||
ManagerOptionsHook: func(opts *rules.ManagerOptions) {
|
||||
opts.PrepareTestRuleFunc = TestNotification
|
||||
},
|
||||
SqlStoreHook: func(store sqlstore.SQLStore) {
|
||||
mockStore := store.(*sqlstoretest.Provider)
|
||||
// Mock the organizations query that SendAlerts makes
|
||||
// Bun generates: SELECT id FROM organizations LIMIT 1 (or SELECT "id" FROM "organizations" LIMIT 1)
|
||||
orgRows := mockStore.Mock().NewRows([]string{"id"}).AddRow(orgID.StringValue())
|
||||
// Match bun's generated query pattern - bun may quote identifiers
|
||||
mockStore.Mock().ExpectQuery("SELECT (.+) FROM (.+)organizations(.+) LIMIT (.+)").WillReturnRows(orgRows)
|
||||
},
|
||||
TelemetryStoreHook: func(store telemetrystore.TelemetryStore) {
|
||||
telemetryStore := store.(*telemetrystoretest.Provider)
|
||||
// Set up mock data for telemetry store
|
||||
cols := make([]cmock.ColumnType, 0)
|
||||
cols = append(cols, cmock.ColumnType{Name: "value", Type: "Float64"})
|
||||
cols = append(cols, cmock.ColumnType{Name: "attr", Type: "String"})
|
||||
cols = append(cols, cmock.ColumnType{Name: "ts", Type: "DateTime"})
|
||||
|
||||
alertDataRows := cmock.NewRows(cols, tc.Values)
|
||||
|
||||
mock := telemetryStore.Mock()
|
||||
|
||||
// Generate query arguments for the metric query
|
||||
evalTime := time.Now().UTC()
|
||||
evalWindow := 5 * time.Minute
|
||||
evalDelay := time.Duration(0)
|
||||
queryArgs := rules.GenerateMetricQueryCHArgs(
|
||||
evalTime,
|
||||
evalWindow,
|
||||
evalDelay,
|
||||
"probe_success",
|
||||
metrictypes.Unspecified,
|
||||
)
|
||||
|
||||
mock.ExpectQuery("*WITH __temporal_aggregation_cte*").
|
||||
WithArgs(queryArgs...).
|
||||
WillReturnRows(alertDataRows)
|
||||
},
|
||||
})
|
||||
|
||||
count, apiErr := mgr.TestNotification(context.Background(), orgID, string(ruleBytes))
|
||||
if apiErr != nil {
|
||||
t.Logf("TestNotification error: %v, type: %s", apiErr.Err, apiErr.Typ)
|
||||
}
|
||||
require.Nil(t, apiErr)
|
||||
assert.Equal(t, tc.ExpectAlerts, count)
|
||||
|
||||
if tc.ExpectAlerts > 0 {
|
||||
// check if the alert has been triggered
|
||||
require.Len(t, triggeredTestAlerts, 1)
|
||||
var gotAlerts []*alertmanagertypes.PostableAlert
|
||||
for a := range triggeredTestAlerts[0] {
|
||||
gotAlerts = append(gotAlerts, a)
|
||||
}
|
||||
require.Len(t, gotAlerts, tc.ExpectAlerts)
|
||||
// check if the alert has triggered with correct threshold value
|
||||
if tc.ExpectValue != 0 {
|
||||
assert.Equal(t, strconv.FormatFloat(tc.ExpectValue, 'f', -1, 64), gotAlerts[0].Annotations["value"])
|
||||
}
|
||||
} else {
|
||||
// check if no alerts have been triggered
|
||||
assert.Empty(t, triggeredTestAlerts)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestManager_TestNotification_SendUnmatched_PromRule(t *testing.T) {
|
||||
target := 10.0
|
||||
|
||||
for _, tc := range rules.TcTestNotificationSendUnmatchedPromRule {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
// Capture base time once per test case to ensure consistent timestamps
|
||||
baseTime := time.Now().UTC()
|
||||
|
||||
rule := rules.BuildPromAtLeastOnceValueAbove(target, nil)
|
||||
|
||||
// Marshal rule to JSON as TestNotification expects
|
||||
ruleBytes, err := json.Marshal(rule)
|
||||
require.NoError(t, err)
|
||||
|
||||
orgID := valuer.GenerateUUID()
|
||||
|
||||
// for saving temp alerts that are triggered via TestNotification
|
||||
triggeredTestAlerts := []map[*alertmanagertypes.PostableAlert][]string{}
|
||||
|
||||
// Variable to store promProvider for cleanup
|
||||
var promProvider *prometheustest.Provider
|
||||
|
||||
// Create manager using test factory with hooks
|
||||
mgr := rules.NewTestManager(t, &rules.TestManagerOptions{
|
||||
AlertmanagerHook: func(am alertmanager.Alertmanager) {
|
||||
mockAM := am.(*alertmanagermock.MockAlertmanager)
|
||||
// mock set notification config
|
||||
mockAM.On("SetNotificationConfig", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
// for saving temp alerts that are triggered via TestNotification
|
||||
if tc.ExpectAlerts > 0 {
|
||||
mockAM.On("TestAlert", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) {
|
||||
triggeredTestAlerts = append(triggeredTestAlerts, args.Get(3).(map[*alertmanagertypes.PostableAlert][]string))
|
||||
}).Return(nil).Times(tc.ExpectAlerts)
|
||||
}
|
||||
},
|
||||
SqlStoreHook: func(store sqlstore.SQLStore) {
|
||||
mockStore := store.(*sqlstoretest.Provider)
|
||||
// Mock the organizations query that SendAlerts makes
|
||||
orgRows := mockStore.Mock().NewRows([]string{"id"}).AddRow(orgID.StringValue())
|
||||
mockStore.Mock().ExpectQuery("SELECT (.+) FROM (.+)organizations(.+) LIMIT (.+)").WillReturnRows(orgRows)
|
||||
},
|
||||
TelemetryStoreHook: func(store telemetrystore.TelemetryStore) {
|
||||
mockStore := store.(*telemetrystoretest.Provider)
|
||||
|
||||
// Set up Prometheus-specific mock data
|
||||
// Fingerprint columns for Prometheus queries
|
||||
fingerprintCols := []cmock.ColumnType{
|
||||
{Name: "fingerprint", Type: "UInt64"},
|
||||
{Name: "any(labels)", Type: "String"},
|
||||
}
|
||||
|
||||
// Samples columns for Prometheus queries
|
||||
samplesCols := []cmock.ColumnType{
|
||||
{Name: "metric_name", Type: "String"},
|
||||
{Name: "fingerprint", Type: "UInt64"},
|
||||
{Name: "unix_milli", Type: "Int64"},
|
||||
{Name: "value", Type: "Float64"},
|
||||
{Name: "flags", Type: "UInt32"},
|
||||
}
|
||||
|
||||
// Calculate query time range similar to Prometheus rule tests
|
||||
// TestNotification uses time.Now().UTC() for evaluation
|
||||
// We calculate the query window based on current time to match what the actual evaluation will use
|
||||
evalTime := baseTime
|
||||
evalWindowMs := int64(5 * 60 * 1000) // 5 minutes in ms
|
||||
evalTimeMs := evalTime.UnixMilli()
|
||||
queryStart := ((evalTimeMs-2*evalWindowMs)/60000)*60000 + 1 // truncate to minute + 1ms
|
||||
queryEnd := (evalTimeMs / 60000) * 60000 // truncate to minute
|
||||
|
||||
// Create fingerprint data
|
||||
fingerprint := uint64(12345)
|
||||
labelsJSON := `{"__name__":"test_metric"}`
|
||||
fingerprintData := [][]interface{}{
|
||||
{fingerprint, labelsJSON},
|
||||
}
|
||||
fingerprintRows := cmock.NewRows(fingerprintCols, fingerprintData)
|
||||
|
||||
// Create samples data from test case values, calculating timestamps relative to baseTime
|
||||
validSamplesData := make([][]interface{}, 0)
|
||||
for _, v := range tc.Values {
|
||||
// Skip NaN and Inf values in the samples data
|
||||
if math.IsNaN(v.Value) || math.IsInf(v.Value, 0) {
|
||||
continue
|
||||
}
|
||||
// Calculate timestamp relative to baseTime
|
||||
sampleTimestamp := baseTime.Add(v.Offset).UnixMilli()
|
||||
validSamplesData = append(validSamplesData, []interface{}{
|
||||
"test_metric",
|
||||
fingerprint,
|
||||
sampleTimestamp,
|
||||
v.Value,
|
||||
uint32(0), // flags - 0 means normal value
|
||||
})
|
||||
}
|
||||
samplesRows := cmock.NewRows(samplesCols, validSamplesData)
|
||||
|
||||
mock := mockStore.Mock()
|
||||
|
||||
// Mock the fingerprint query (for Prometheus label matching)
|
||||
mock.ExpectQuery("SELECT fingerprint, any").
|
||||
WithArgs("test_metric", "__name__", "test_metric").
|
||||
WillReturnRows(fingerprintRows)
|
||||
|
||||
// Mock the samples query (for Prometheus metric data)
|
||||
mock.ExpectQuery("SELECT metric_name, fingerprint, unix_milli").
|
||||
WithArgs(
|
||||
"test_metric",
|
||||
"test_metric",
|
||||
"__name__",
|
||||
"test_metric",
|
||||
queryStart,
|
||||
queryEnd,
|
||||
).
|
||||
WillReturnRows(samplesRows)
|
||||
|
||||
// Create Prometheus provider for this test
|
||||
promProvider = prometheustest.New(context.Background(), instrumentationtest.New().ToProviderSettings(), prometheus.Config{}, store)
|
||||
},
|
||||
ManagerOptionsHook: func(opts *rules.ManagerOptions) {
|
||||
// Set Prometheus provider for PromQL queries
|
||||
if promProvider != nil {
|
||||
opts.Prometheus = promProvider
|
||||
}
|
||||
opts.PrepareTestRuleFunc = TestNotification
|
||||
},
|
||||
})
|
||||
|
||||
count, apiErr := mgr.TestNotification(context.Background(), orgID, string(ruleBytes))
|
||||
if apiErr != nil {
|
||||
t.Logf("TestNotification error: %v, type: %s", apiErr.Err, apiErr.Typ)
|
||||
}
|
||||
require.Nil(t, apiErr)
|
||||
assert.Equal(t, tc.ExpectAlerts, count)
|
||||
|
||||
if tc.ExpectAlerts > 0 {
|
||||
// check if the alert has been triggered
|
||||
require.Len(t, triggeredTestAlerts, 1)
|
||||
var gotAlerts []*alertmanagertypes.PostableAlert
|
||||
for a := range triggeredTestAlerts[0] {
|
||||
gotAlerts = append(gotAlerts, a)
|
||||
}
|
||||
require.Len(t, gotAlerts, tc.ExpectAlerts)
|
||||
// check if the alert has triggered with correct threshold value
|
||||
if tc.ExpectValue != 0 && !math.IsNaN(tc.ExpectValue) && !math.IsInf(tc.ExpectValue, 0) {
|
||||
assert.Equal(t, strconv.FormatFloat(tc.ExpectValue, 'f', -1, 64), gotAlerts[0].Annotations["value"])
|
||||
}
|
||||
} else {
|
||||
// check if no alerts have been triggered
|
||||
assert.Empty(t, triggeredTestAlerts)
|
||||
}
|
||||
|
||||
promProvider.Close()
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -30,6 +30,8 @@ func (formatter Formatter) DataTypeOf(dataType string) sqlschema.DataType {
|
||||
return sqlschema.DataTypeBoolean
|
||||
case "VARCHAR", "CHARACTER VARYING", "CHARACTER":
|
||||
return sqlschema.DataTypeText
|
||||
case "BYTEA":
|
||||
return sqlschema.DataTypeBytea
|
||||
}
|
||||
|
||||
return formatter.Formatter.DataTypeOf(dataType)
|
||||
|
||||
@@ -2,6 +2,7 @@ package postgressqlschema
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/sqlschema"
|
||||
@@ -47,50 +48,45 @@ func (provider *provider) Operator() sqlschema.SQLOperator {
|
||||
}
|
||||
|
||||
func (provider *provider) GetTable(ctx context.Context, tableName sqlschema.TableName) (*sqlschema.Table, []*sqlschema.UniqueConstraint, error) {
|
||||
rows, err := provider.
|
||||
columns := []struct {
|
||||
ColumnName string `bun:"column_name"`
|
||||
Nullable bool `bun:"nullable"`
|
||||
SQLDataType string `bun:"udt_name"`
|
||||
DefaultVal *string `bun:"column_default"`
|
||||
}{}
|
||||
|
||||
err := provider.
|
||||
sqlstore.
|
||||
BunDB().
|
||||
QueryContext(ctx, `
|
||||
NewRaw(`
|
||||
SELECT
|
||||
c.column_name,
|
||||
c.is_nullable = 'YES',
|
||||
c.is_nullable = 'YES' as nullable,
|
||||
c.udt_name,
|
||||
c.column_default
|
||||
FROM
|
||||
information_schema.columns AS c
|
||||
WHERE
|
||||
c.table_name = ?`, string(tableName))
|
||||
c.table_name = ?`, string(tableName)).
|
||||
Scan(ctx, &columns)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(columns) == 0 {
|
||||
return nil, nil, sql.ErrNoRows
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := rows.Close(); err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "error closing rows", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
columns := make([]*sqlschema.Column, 0)
|
||||
for rows.Next() {
|
||||
var (
|
||||
name string
|
||||
sqlDataType string
|
||||
nullable bool
|
||||
defaultVal *string
|
||||
)
|
||||
if err := rows.Scan(&name, &nullable, &sqlDataType, &defaultVal); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sqlschemaColumns := make([]*sqlschema.Column, 0)
|
||||
for _, column := range columns {
|
||||
columnDefault := ""
|
||||
if defaultVal != nil {
|
||||
columnDefault = *defaultVal
|
||||
if column.DefaultVal != nil {
|
||||
columnDefault = *column.DefaultVal
|
||||
}
|
||||
|
||||
columns = append(columns, &sqlschema.Column{
|
||||
Name: sqlschema.ColumnName(name),
|
||||
Nullable: nullable,
|
||||
DataType: provider.fmter.DataTypeOf(sqlDataType),
|
||||
sqlschemaColumns = append(sqlschemaColumns, &sqlschema.Column{
|
||||
Name: sqlschema.ColumnName(column.ColumnName),
|
||||
Nullable: column.Nullable,
|
||||
DataType: provider.fmter.DataTypeOf(column.SQLDataType),
|
||||
Default: columnDefault,
|
||||
})
|
||||
}
|
||||
@@ -208,7 +204,7 @@ WHERE
|
||||
|
||||
return &sqlschema.Table{
|
||||
Name: tableName,
|
||||
Columns: columns,
|
||||
Columns: sqlschemaColumns,
|
||||
PrimaryKeyConstraint: primaryKeyConstraint,
|
||||
ForeignKeyConstraints: foreignKeyConstraints,
|
||||
}, uniqueConstraints, nil
|
||||
|
||||
153
ee/sqlstore/postgressqlstore/formatter.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package postgressqlstore
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
type formatter struct {
|
||||
bunf schema.Formatter
|
||||
}
|
||||
|
||||
func newFormatter(dialect schema.Dialect) sqlstore.SQLFormatter {
|
||||
return &formatter{bunf: schema.NewFormatter(dialect)}
|
||||
}
|
||||
|
||||
func (f *formatter) JSONExtractString(column, path string) []byte {
|
||||
var sql []byte
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, f.convertJSONPathToPostgres(path)...)
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) JSONType(column, path string) []byte {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_typeof("...)
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, f.convertJSONPathToPostgresWithMode(path, false)...)
|
||||
sql = append(sql, ')')
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) JSONIsArray(column, path string) []byte {
|
||||
var sql []byte
|
||||
sql = append(sql, f.JSONType(column, path)...)
|
||||
sql = append(sql, " = "...)
|
||||
sql = schema.Append(f.bunf, sql, "array")
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) JSONArrayElements(column, path, alias string) ([]byte, []byte) {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_array_elements("...)
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, f.convertJSONPathToPostgresWithMode(path, false)...)
|
||||
sql = append(sql, ") AS "...)
|
||||
sql = f.bunf.AppendIdent(sql, alias)
|
||||
|
||||
return sql, []byte(alias)
|
||||
}
|
||||
|
||||
func (f *formatter) JSONArrayOfStrings(column, path, alias string) ([]byte, []byte) {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_array_elements_text("...)
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, f.convertJSONPathToPostgresWithMode(path, false)...)
|
||||
sql = append(sql, ") AS "...)
|
||||
sql = f.bunf.AppendIdent(sql, alias)
|
||||
|
||||
return sql, append([]byte(alias), "::text"...)
|
||||
}
|
||||
|
||||
func (f *formatter) JSONKeys(column, path, alias string) ([]byte, []byte) {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_each("...)
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, f.convertJSONPathToPostgresWithMode(path, false)...)
|
||||
sql = append(sql, ") AS "...)
|
||||
sql = f.bunf.AppendIdent(sql, alias)
|
||||
|
||||
return sql, append([]byte(alias), ".key"...)
|
||||
}
|
||||
|
||||
func (f *formatter) JSONArrayAgg(expression string) []byte {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_agg("...)
|
||||
sql = append(sql, expression...)
|
||||
sql = append(sql, ')')
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) JSONArrayLiteral(values ...string) []byte {
|
||||
var sql []byte
|
||||
sql = append(sql, "jsonb_build_array("...)
|
||||
for idx, value := range values {
|
||||
if idx > 0 {
|
||||
sql = append(sql, ", "...)
|
||||
}
|
||||
sql = schema.Append(f.bunf, sql, value)
|
||||
}
|
||||
sql = append(sql, ')')
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) TextToJsonColumn(column string) []byte {
|
||||
var sql []byte
|
||||
sql = f.bunf.AppendIdent(sql, column)
|
||||
sql = append(sql, "::jsonb"...)
|
||||
return sql
|
||||
}
|
||||
|
||||
func (f *formatter) convertJSONPathToPostgres(jsonPath string) []byte {
|
||||
return f.convertJSONPathToPostgresWithMode(jsonPath, true)
|
||||
}
|
||||
|
||||
func (f *formatter) convertJSONPathToPostgresWithMode(jsonPath string, asText bool) []byte {
|
||||
path := strings.TrimPrefix(strings.TrimPrefix(jsonPath, "$"), ".")
|
||||
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
parts := strings.Split(path, ".")
|
||||
|
||||
var validParts []string
|
||||
for _, part := range parts {
|
||||
if part != "" {
|
||||
validParts = append(validParts, part)
|
||||
}
|
||||
}
|
||||
|
||||
if len(validParts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var result []byte
|
||||
|
||||
for idx, part := range validParts {
|
||||
if idx == len(validParts)-1 {
|
||||
if asText {
|
||||
result = append(result, "->>"...)
|
||||
} else {
|
||||
result = append(result, "->"...)
|
||||
}
|
||||
result = schema.Append(f.bunf, result, part)
|
||||
return result
|
||||
}
|
||||
|
||||
result = append(result, "->"...)
|
||||
result = schema.Append(f.bunf, result, part)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (f *formatter) LowerExpression(expression string) []byte {
|
||||
var sql []byte
|
||||
sql = append(sql, "lower("...)
|
||||
sql = append(sql, expression...)
|
||||
sql = append(sql, ')')
|
||||
return sql
|
||||
}
|
||||
500
ee/sqlstore/postgressqlstore/formatter_test.go
Normal file
@@ -0,0 +1,500 @@
|
||||
package postgressqlstore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/uptrace/bun/dialect/pgdialect"
|
||||
)
|
||||
|
||||
func TestJSONExtractString(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
column: "data",
|
||||
path: "$.field",
|
||||
expected: `"data"->>'field'`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.user.name",
|
||||
expected: `"metadata"->'user'->>'name'`,
|
||||
},
|
||||
{
|
||||
name: "deeply nested path",
|
||||
column: "json_col",
|
||||
path: "$.level1.level2.level3",
|
||||
expected: `"json_col"->'level1'->'level2'->>'level3'`,
|
||||
},
|
||||
{
|
||||
name: "root path",
|
||||
column: "json_col",
|
||||
path: "$",
|
||||
expected: `"json_col"`,
|
||||
},
|
||||
{
|
||||
name: "empty path",
|
||||
column: "data",
|
||||
path: "",
|
||||
expected: `"data"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.JSONExtractString(tt.column, tt.path))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
column: "data",
|
||||
path: "$.field",
|
||||
expected: `jsonb_typeof("data"->'field')`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.user.age",
|
||||
expected: `jsonb_typeof("metadata"->'user'->'age')`,
|
||||
},
|
||||
{
|
||||
name: "root path",
|
||||
column: "json_col",
|
||||
path: "$",
|
||||
expected: `jsonb_typeof("json_col")`,
|
||||
},
|
||||
{
|
||||
name: "empty path",
|
||||
column: "data",
|
||||
path: "",
|
||||
expected: `jsonb_typeof("data")`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.JSONType(tt.column, tt.path))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONIsArray(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple path",
|
||||
column: "data",
|
||||
path: "$.items",
|
||||
expected: `jsonb_typeof("data"->'items') = 'array'`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.user.tags",
|
||||
expected: `jsonb_typeof("metadata"->'user'->'tags') = 'array'`,
|
||||
},
|
||||
{
|
||||
name: "root path",
|
||||
column: "json_col",
|
||||
path: "$",
|
||||
expected: `jsonb_typeof("json_col") = 'array'`,
|
||||
},
|
||||
{
|
||||
name: "empty path",
|
||||
column: "data",
|
||||
path: "",
|
||||
expected: `jsonb_typeof("data") = 'array'`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.JSONIsArray(tt.column, tt.path))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONArrayElements(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
alias string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "root path with dollar sign",
|
||||
column: "data",
|
||||
path: "$",
|
||||
alias: "elem",
|
||||
expected: `jsonb_array_elements("data") AS "elem"`,
|
||||
},
|
||||
{
|
||||
name: "root path empty",
|
||||
column: "data",
|
||||
path: "",
|
||||
alias: "elem",
|
||||
expected: `jsonb_array_elements("data") AS "elem"`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.items",
|
||||
alias: "item",
|
||||
expected: `jsonb_array_elements("metadata"->'items') AS "item"`,
|
||||
},
|
||||
{
|
||||
name: "deeply nested path",
|
||||
column: "json_col",
|
||||
path: "$.user.tags",
|
||||
alias: "tag",
|
||||
expected: `jsonb_array_elements("json_col"->'user'->'tags') AS "tag"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got, _ := f.JSONArrayElements(tt.column, tt.path, tt.alias)
|
||||
assert.Equal(t, tt.expected, string(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONArrayOfStrings(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
alias string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "root path with dollar sign",
|
||||
column: "data",
|
||||
path: "$",
|
||||
alias: "str",
|
||||
expected: `jsonb_array_elements_text("data") AS "str"`,
|
||||
},
|
||||
{
|
||||
name: "root path empty",
|
||||
column: "data",
|
||||
path: "",
|
||||
alias: "str",
|
||||
expected: `jsonb_array_elements_text("data") AS "str"`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.strings",
|
||||
alias: "s",
|
||||
expected: `jsonb_array_elements_text("metadata"->'strings') AS "s"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got, _ := f.JSONArrayOfStrings(tt.column, tt.path, tt.alias)
|
||||
assert.Equal(t, tt.expected, string(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONKeys(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
path string
|
||||
alias string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "root path with dollar sign",
|
||||
column: "data",
|
||||
path: "$",
|
||||
alias: "k",
|
||||
expected: `jsonb_each("data") AS "k"`,
|
||||
},
|
||||
{
|
||||
name: "root path empty",
|
||||
column: "data",
|
||||
path: "",
|
||||
alias: "k",
|
||||
expected: `jsonb_each("data") AS "k"`,
|
||||
},
|
||||
{
|
||||
name: "nested path",
|
||||
column: "metadata",
|
||||
path: "$.object",
|
||||
alias: "key",
|
||||
expected: `jsonb_each("metadata"->'object') AS "key"`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got, _ := f.JSONKeys(tt.column, tt.path, tt.alias)
|
||||
assert.Equal(t, tt.expected, string(got))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONArrayAgg(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
expression string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple column",
|
||||
expression: "id",
|
||||
expected: "jsonb_agg(id)",
|
||||
},
|
||||
{
|
||||
name: "expression with function",
|
||||
expression: "DISTINCT name",
|
||||
expected: "jsonb_agg(DISTINCT name)",
|
||||
},
|
||||
{
|
||||
name: "complex expression",
|
||||
expression: "data->>'field'",
|
||||
expected: "jsonb_agg(data->>'field')",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.JSONArrayAgg(tt.expression))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONArrayLiteral(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
values []string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "empty array",
|
||||
values: []string{},
|
||||
expected: "jsonb_build_array()",
|
||||
},
|
||||
{
|
||||
name: "single value",
|
||||
values: []string{"value1"},
|
||||
expected: "jsonb_build_array('value1')",
|
||||
},
|
||||
{
|
||||
name: "multiple values",
|
||||
values: []string{"value1", "value2", "value3"},
|
||||
expected: "jsonb_build_array('value1', 'value2', 'value3')",
|
||||
},
|
||||
{
|
||||
name: "values with special characters",
|
||||
values: []string{"test", "with space", "with-dash"},
|
||||
expected: "jsonb_build_array('test', 'with space', 'with-dash')",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.JSONArrayLiteral(tt.values...))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertJSONPathToPostgresWithMode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
jsonPath string
|
||||
asText bool
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple path as text",
|
||||
jsonPath: "$.field",
|
||||
asText: true,
|
||||
expected: "->>'field'",
|
||||
},
|
||||
{
|
||||
name: "simple path as json",
|
||||
jsonPath: "$.field",
|
||||
asText: false,
|
||||
expected: "->'field'",
|
||||
},
|
||||
{
|
||||
name: "nested path as text",
|
||||
jsonPath: "$.user.name",
|
||||
asText: true,
|
||||
expected: "->'user'->>'name'",
|
||||
},
|
||||
{
|
||||
name: "nested path as json",
|
||||
jsonPath: "$.user.name",
|
||||
asText: false,
|
||||
expected: "->'user'->'name'",
|
||||
},
|
||||
{
|
||||
name: "deeply nested as text",
|
||||
jsonPath: "$.a.b.c.d",
|
||||
asText: true,
|
||||
expected: "->'a'->'b'->'c'->>'d'",
|
||||
},
|
||||
{
|
||||
name: "root path",
|
||||
jsonPath: "$",
|
||||
asText: true,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "empty path",
|
||||
jsonPath: "",
|
||||
asText: true,
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New()).(*formatter)
|
||||
got := string(f.convertJSONPathToPostgresWithMode(tt.jsonPath, tt.asText))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTextToJsonColumn(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
column string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple column name",
|
||||
column: "data",
|
||||
expected: `"data"::jsonb`,
|
||||
},
|
||||
{
|
||||
name: "column with underscore",
|
||||
column: "user_data",
|
||||
expected: `"user_data"::jsonb`,
|
||||
},
|
||||
{
|
||||
name: "column with special characters",
|
||||
column: "json-col",
|
||||
expected: `"json-col"::jsonb`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.TextToJsonColumn(tt.column))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLowerExpression(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
expr string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "simple column name",
|
||||
expr: "name",
|
||||
expected: "lower(name)",
|
||||
},
|
||||
{
|
||||
name: "quoted column identifier",
|
||||
expr: `"column_name"`,
|
||||
expected: `lower("column_name")`,
|
||||
},
|
||||
{
|
||||
name: "jsonb text extraction",
|
||||
expr: "data->>'field'",
|
||||
expected: "lower(data->>'field')",
|
||||
},
|
||||
{
|
||||
name: "nested jsonb extraction",
|
||||
expr: "metadata->'user'->>'name'",
|
||||
expected: "lower(metadata->'user'->>'name')",
|
||||
},
|
||||
{
|
||||
name: "jsonb_typeof expression",
|
||||
expr: "jsonb_typeof(data->'field')",
|
||||
expected: "lower(jsonb_typeof(data->'field'))",
|
||||
},
|
||||
{
|
||||
name: "string concatenation",
|
||||
expr: "first_name || ' ' || last_name",
|
||||
expected: "lower(first_name || ' ' || last_name)",
|
||||
},
|
||||
{
|
||||
name: "CAST expression",
|
||||
expr: "CAST(value AS TEXT)",
|
||||
expected: "lower(CAST(value AS TEXT))",
|
||||
},
|
||||
{
|
||||
name: "COALESCE expression",
|
||||
expr: "COALESCE(name, 'default')",
|
||||
expected: "lower(COALESCE(name, 'default'))",
|
||||
},
|
||||
{
|
||||
name: "subquery column",
|
||||
expr: "users.email",
|
||||
expected: "lower(users.email)",
|
||||
},
|
||||
{
|
||||
name: "quoted identifier with special chars",
|
||||
expr: `"user-name"`,
|
||||
expected: `lower("user-name")`,
|
||||
},
|
||||
{
|
||||
name: "jsonb to text cast",
|
||||
expr: "data::text",
|
||||
expected: "lower(data::text)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
f := newFormatter(pgdialect.New())
|
||||
got := string(f.LowerExpression(tt.expr))
|
||||
assert.Equal(t, tt.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -15,10 +15,11 @@ import (
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
sqldb *sql.DB
|
||||
bundb *sqlstore.BunDB
|
||||
dialect *dialect
|
||||
settings factory.ScopedProviderSettings
|
||||
sqldb *sql.DB
|
||||
bundb *sqlstore.BunDB
|
||||
dialect *dialect
|
||||
formatter sqlstore.SQLFormatter
|
||||
}
|
||||
|
||||
func NewFactory(hookFactories ...factory.ProviderFactory[sqlstore.SQLStoreHook, sqlstore.Config]) factory.ProviderFactory[sqlstore.SQLStore, sqlstore.Config] {
|
||||
@@ -55,11 +56,14 @@ func New(ctx context.Context, providerSettings factory.ProviderSettings, config
|
||||
|
||||
sqldb := stdlib.OpenDBFromPool(pool)
|
||||
|
||||
pgDialect := pgdialect.New()
|
||||
bunDB := sqlstore.NewBunDB(settings, sqldb, pgDialect, hooks)
|
||||
return &provider{
|
||||
settings: settings,
|
||||
sqldb: sqldb,
|
||||
bundb: sqlstore.NewBunDB(settings, sqldb, pgdialect.New(), hooks),
|
||||
dialect: new(dialect),
|
||||
settings: settings,
|
||||
sqldb: sqldb,
|
||||
bundb: bunDB,
|
||||
dialect: new(dialect),
|
||||
formatter: newFormatter(bunDB.Dialect()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -75,6 +79,10 @@ func (provider *provider) Dialect() sqlstore.SQLDialect {
|
||||
return provider.dialect
|
||||
}
|
||||
|
||||
func (provider *provider) Formatter() sqlstore.SQLFormatter {
|
||||
return provider.formatter
|
||||
}
|
||||
|
||||
func (provider *provider) BunDBCtx(ctx context.Context) bun.IDB {
|
||||
return provider.bundb.BunDBCtx(ctx)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package zeus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
neturl "net/url"
|
||||
"sync"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/zeus"
|
||||
)
|
||||
|
||||
@@ -24,17 +24,17 @@ func Config() zeus.Config {
|
||||
once.Do(func() {
|
||||
parsedURL, err := neturl.Parse(url)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("invalid zeus URL: %w", err))
|
||||
panic(errors.WrapInternalf(err, errors.CodeInternal, "invalid zeus URL"))
|
||||
}
|
||||
|
||||
deprecatedParsedURL, err := neturl.Parse(deprecatedURL)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("invalid zeus deprecated URL: %w", err))
|
||||
panic(errors.WrapInternalf(err, errors.CodeInternal, "invalid zeus deprecated URL"))
|
||||
}
|
||||
|
||||
config = zeus.Config{URL: parsedURL, DeprecatedURL: deprecatedParsedURL}
|
||||
if err := config.Validate(); err != nil {
|
||||
panic(fmt.Errorf("invalid zeus config: %w", err))
|
||||
panic(errors.WrapInternalf(err, errors.CodeInternal, "invalid zeus config"))
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
@@ -2,4 +2,6 @@ node_modules
|
||||
build
|
||||
*.typegen.ts
|
||||
i18-generate-hash.js
|
||||
src/parser/TraceOperatorParser/**
|
||||
src/parser/TraceOperatorParser/**
|
||||
|
||||
orval.config.ts
|
||||
@@ -1,5 +1,8 @@
|
||||
/**
|
||||
* ESLint Configuration for SigNoz Frontend
|
||||
*/
|
||||
module.exports = {
|
||||
ignorePatterns: ['src/parser/*.ts'],
|
||||
ignorePatterns: ['src/parser/*.ts', 'scripts/update-registry.js'],
|
||||
env: {
|
||||
browser: true,
|
||||
es2021: true,
|
||||
@@ -7,16 +10,12 @@ module.exports = {
|
||||
'jest/globals': true,
|
||||
},
|
||||
extends: [
|
||||
'airbnb',
|
||||
'airbnb-typescript',
|
||||
'eslint:recommended',
|
||||
'plugin:react/recommended',
|
||||
'plugin:@typescript-eslint/recommended',
|
||||
'plugin:@typescript-eslint/eslint-recommended',
|
||||
'plugin:react-hooks/recommended',
|
||||
'plugin:prettier/recommended',
|
||||
'plugin:sonarjs/recommended',
|
||||
'plugin:import/errors',
|
||||
'plugin:import/warnings',
|
||||
'plugin:react/jsx-runtime',
|
||||
],
|
||||
parser: '@typescript-eslint/parser',
|
||||
@@ -25,16 +24,21 @@ module.exports = {
|
||||
ecmaFeatures: {
|
||||
jsx: true,
|
||||
},
|
||||
ecmaVersion: 12,
|
||||
ecmaVersion: 2021,
|
||||
sourceType: 'module',
|
||||
},
|
||||
plugins: [
|
||||
'react',
|
||||
'@typescript-eslint',
|
||||
'simple-import-sort',
|
||||
'react-hooks',
|
||||
'prettier',
|
||||
'jest',
|
||||
'react', // React-specific rules
|
||||
'@typescript-eslint', // TypeScript linting
|
||||
'simple-import-sort', // Auto-sort imports
|
||||
'react-hooks', // React Hooks rules
|
||||
'prettier', // Code formatting
|
||||
'jest', // Jest test rules
|
||||
'jsx-a11y', // Accessibility rules
|
||||
'import', // Import/export linting
|
||||
'sonarjs', // Code quality/complexity
|
||||
// TODO: Uncomment after running: yarn add -D eslint-plugin-spellcheck
|
||||
// 'spellcheck', // Correct spellings
|
||||
],
|
||||
settings: {
|
||||
react: {
|
||||
@@ -48,77 +52,110 @@ module.exports = {
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
// Code quality rules
|
||||
'prefer-const': 'error', // Enforces const for variables never reassigned
|
||||
'no-var': 'error', // Disallows var, enforces let/const
|
||||
'no-else-return': ['error', { allowElseIf: false }], // Reduces nesting by disallowing else after return
|
||||
'no-cond-assign': 'error', // Prevents accidental assignment in conditions (if (x = 1) instead of if (x === 1))
|
||||
'no-debugger': 'error', // Disallows debugger statements in production code
|
||||
curly: 'error', // Requires curly braces for all control statements
|
||||
eqeqeq: ['error', 'always', { null: 'ignore' }], // Enforces === and !== (allows == null for null/undefined check)
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }], // Warns on console.log, allows console.warn/error
|
||||
|
||||
// TypeScript rules
|
||||
'@typescript-eslint/explicit-function-return-type': 'error', // Requires explicit return types on functions
|
||||
'@typescript-eslint/no-unused-vars': [
|
||||
// Disallows unused variables/args
|
||||
'error',
|
||||
{
|
||||
argsIgnorePattern: '^_', // Allows unused args prefixed with _ (e.g., _unusedParam)
|
||||
varsIgnorePattern: '^_', // Allows unused vars prefixed with _ (e.g., _unusedVar)
|
||||
},
|
||||
],
|
||||
'@typescript-eslint/no-explicit-any': 'warn', // Warns when using 'any' type (consider upgrading to error)
|
||||
// TODO: Change to 'error' after fixing ~80 empty function placeholders in providers/contexts
|
||||
'@typescript-eslint/no-empty-function': 'off', // Disallows empty function bodies
|
||||
'@typescript-eslint/no-var-requires': 'error', // Disallows require() in TypeScript (use import instead)
|
||||
'@typescript-eslint/ban-ts-comment': 'off', // Allows @ts-ignore comments (sometimes needed for third-party libs)
|
||||
'no-empty-function': 'off', // Disabled in favor of TypeScript version above
|
||||
|
||||
// React rules
|
||||
'react/jsx-filename-extension': [
|
||||
'error',
|
||||
{
|
||||
extensions: ['.tsx', '.js', '.jsx'],
|
||||
extensions: ['.tsx', '.jsx'], // Warns if JSX is used in non-.jsx/.tsx files
|
||||
},
|
||||
],
|
||||
'react/prop-types': 'off',
|
||||
'@typescript-eslint/explicit-function-return-type': 'error',
|
||||
'@typescript-eslint/no-var-requires': 'error',
|
||||
'react/no-array-index-key': 'error',
|
||||
'linebreak-style': [
|
||||
'error',
|
||||
process.env.platform === 'win32' ? 'windows' : 'unix',
|
||||
],
|
||||
'@typescript-eslint/default-param-last': 'off',
|
||||
'react/prop-types': 'off', // Disabled - using TypeScript instead
|
||||
'react/jsx-props-no-spreading': 'off', // Allows {...props} spreading (common in HOCs, forms, wrappers)
|
||||
'react/no-array-index-key': 'error', // Prevents using array index as key (causes bugs when list changes)
|
||||
|
||||
// simple sort error
|
||||
'simple-import-sort/imports': 'error',
|
||||
'simple-import-sort/exports': 'error',
|
||||
|
||||
// hooks
|
||||
'react-hooks/rules-of-hooks': 'error',
|
||||
'react-hooks/exhaustive-deps': 'error',
|
||||
|
||||
// airbnb
|
||||
'no-underscore-dangle': 'off',
|
||||
'no-console': 'off',
|
||||
'import/prefer-default-export': 'off',
|
||||
'import/extensions': [
|
||||
'error',
|
||||
'ignorePackages',
|
||||
{
|
||||
js: 'never',
|
||||
jsx: 'never',
|
||||
ts: 'never',
|
||||
tsx: 'never',
|
||||
},
|
||||
],
|
||||
'import/no-extraneous-dependencies': ['error', { devDependencies: true }],
|
||||
'no-plusplus': 'off',
|
||||
// Accessibility rules
|
||||
'jsx-a11y/label-has-associated-control': [
|
||||
'error',
|
||||
{
|
||||
required: {
|
||||
some: ['nesting', 'id'],
|
||||
some: ['nesting', 'id'], // Labels must either wrap inputs or use htmlFor/id
|
||||
},
|
||||
},
|
||||
],
|
||||
'jsx-a11y/label-has-for': [
|
||||
'error',
|
||||
{
|
||||
required: {
|
||||
some: ['nesting', 'id'],
|
||||
},
|
||||
},
|
||||
],
|
||||
'@typescript-eslint/no-unused-vars': 'error',
|
||||
'func-style': ['error', 'declaration', { allowArrowFunctions: true }],
|
||||
'arrow-body-style': ['error', 'as-needed'],
|
||||
|
||||
// eslint rules need to remove
|
||||
'@typescript-eslint/no-shadow': 'off',
|
||||
'import/no-cycle': 'off',
|
||||
// https://typescript-eslint.io/rules/consistent-return/ check the warning for details
|
||||
'consistent-return': 'off',
|
||||
// React Hooks rules
|
||||
'react-hooks/rules-of-hooks': 'error', // Enforces Rules of Hooks (only call at top level)
|
||||
'react-hooks/exhaustive-deps': 'warn', // Warns about missing dependencies in useEffect/useMemo/useCallback
|
||||
|
||||
// Import/export rules
|
||||
'import/extensions': [
|
||||
'error',
|
||||
'ignorePackages',
|
||||
{
|
||||
js: 'never', // Disallows .js extension in imports
|
||||
jsx: 'never', // Disallows .jsx extension in imports
|
||||
ts: 'never', // Disallows .ts extension in imports
|
||||
tsx: 'never', // Disallows .tsx extension in imports
|
||||
},
|
||||
],
|
||||
'import/no-extraneous-dependencies': ['error', { devDependencies: true }], // Prevents importing packages not in package.json
|
||||
// 'import/no-cycle': 'warn', // TODO: Enable later to detect circular dependencies
|
||||
|
||||
// TODO: Enable in separate PR with auto fixes
|
||||
// // Import sorting rules
|
||||
// 'simple-import-sort/imports': [
|
||||
// 'error',
|
||||
// {
|
||||
// groups: [
|
||||
// ['^react', '^@?\\w'], // React first, then external packages
|
||||
// ['^@/'], // Absolute imports with @ alias
|
||||
// ['^\\u0000'], // Side effect imports (import './file')
|
||||
// ['^\\.'], // Relative imports
|
||||
// ['^.+\\.s?css$'], // Style imports
|
||||
// ],
|
||||
// },
|
||||
// ],
|
||||
// 'simple-import-sort/exports': 'error', // Auto-sorts exports
|
||||
|
||||
// Prettier - code formatting
|
||||
'prettier/prettier': [
|
||||
'error',
|
||||
{},
|
||||
{
|
||||
usePrettierrc: true,
|
||||
usePrettierrc: true, // Uses .prettierrc.json for formatting rules
|
||||
},
|
||||
],
|
||||
|
||||
// SonarJS - code quality and complexity
|
||||
'sonarjs/no-duplicate-string': 'off', // Disabled - can be noisy (enable periodically to check)
|
||||
},
|
||||
overrides: [
|
||||
{
|
||||
files: ['src/api/generated/**/*.ts'],
|
||||
rules: {
|
||||
'@typescript-eslint/explicit-function-return-type': 'off',
|
||||
'@typescript-eslint/explicit-module-boundary-types': 'off',
|
||||
'no-nested-ternary': 'off',
|
||||
'@typescript-eslint/no-unused-vars': 'warn',
|
||||
'sonarjs/no-duplicate-string': 'off',
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
@@ -4,5 +4,14 @@
|
||||
"tabWidth": 1,
|
||||
"singleQuote": true,
|
||||
"jsxSingleQuote": false,
|
||||
"semi": true
|
||||
"semi": true,
|
||||
"printWidth": 80,
|
||||
"bracketSpacing": true,
|
||||
"jsxBracketSameLine": false,
|
||||
"arrowParens": "always",
|
||||
"endOfLine": "lf",
|
||||
"quoteProps": "as-needed",
|
||||
"proseWrap": "preserve",
|
||||
"htmlWhitespaceSensitivity": "css",
|
||||
"embeddedLanguageFormatting": "auto"
|
||||
}
|
||||
|
||||
8
frontend/.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"editor.formatOnSave": true,
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.fixAll.eslint": "explicit"
|
||||
},
|
||||
"prettier.requireConfig": true
|
||||
}
|
||||
@@ -34,12 +34,18 @@ Embrace the spirit of collaboration and contribute to the success of our open-so
|
||||
### Linting and Setup
|
||||
|
||||
- It is crucial to refrain from disabling ESLint and TypeScript errors within the project. If there is a specific rule that needs to be disabled, provide a clear and justified explanation for doing so. Maintaining the integrity of the linting and type-checking processes ensures code quality and consistency throughout the codebase.
|
||||
- In our project, we rely on several essential ESLint plugins, namely:
|
||||
- [plugin:@typescript-eslint](https://typescript-eslint.io/rules/)
|
||||
- [airbnb styleguide](https://github.com/airbnb/javascript)
|
||||
- [plugin:sonarjs](https://github.com/SonarSource/eslint-plugin-sonarjs)
|
||||
- In our project, we rely on several essential ESLint plugins and configurations:
|
||||
|
||||
To ensure compliance with our coding standards and best practices, we encourage you to refer to the documentation of these plugins. Familiarizing yourself with the ESLint rules they provide will help maintain code quality and consistency throughout the project.
|
||||
- [eslint:recommended](https://eslint.org/docs/latest/rules/) - Core ESLint rules for JavaScript best practices
|
||||
- [plugin:@typescript-eslint](https://typescript-eslint.io/rules/) - TypeScript-specific linting rules
|
||||
- [plugin:react](https://github.com/jsx-eslint/eslint-plugin-react) - React best practices and patterns
|
||||
- [plugin:react-hooks](https://www.npmjs.com/package/eslint-plugin-react-hooks) - Rules of Hooks enforcement
|
||||
- [plugin:sonarjs](https://github.com/SonarSource/eslint-plugin-sonarjs) - Code quality and complexity analysis
|
||||
- [plugin:prettier](https://github.com/prettier/eslint-plugin-prettier) - Code formatting via Prettier
|
||||
- [simple-import-sort](https://github.com/lydell/eslint-plugin-simple-import-sort) - Automatic import organization
|
||||
- [plugin:jsx-a11y](https://github.com/jsx-eslint/eslint-plugin-jsx-a11y) - Accessibility rules for JSX elements
|
||||
|
||||
To ensure compliance with our coding standards and best practices, we encourage you to refer to the documentation of these plugins. Familiarizing yourself with the ESLint rules they provide will help maintain code quality and consistency throughout the project.
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
|
||||
@@ -3,5 +3,6 @@ BUNDLE_ANALYSER="true"
|
||||
FRONTEND_API_ENDPOINT="http://localhost:8080/"
|
||||
PYLON_APP_ID="pylon-app-id"
|
||||
APPCUES_APP_ID="appcess-app-id"
|
||||
PYLON_IDENTITY_SECRET="pylon-identity-secret"
|
||||
|
||||
CI="1"
|
||||
97
frontend/orval.config.ts
Normal file
@@ -0,0 +1,97 @@
|
||||
/**
|
||||
* When making changes to this file, remove this the file name from .eslintignore and tsconfig.json
|
||||
* The reason this is required because of the moduleResolution being "node". Changing this is a more detailed effort.
|
||||
* So, until then, we will keep this file ignored for eslint and typescript.
|
||||
*/
|
||||
|
||||
import { defineConfig } from 'orval';
|
||||
|
||||
export default defineConfig({
|
||||
signoz: {
|
||||
input: {
|
||||
target: '../docs/api/openapi.yml',
|
||||
},
|
||||
output: {
|
||||
target: './src/api/generated/services',
|
||||
client: 'react-query',
|
||||
httpClient: 'axios',
|
||||
mode: 'tags-split',
|
||||
prettier: true,
|
||||
headers: true,
|
||||
clean: true,
|
||||
override: {
|
||||
query: {
|
||||
useQuery: true,
|
||||
useMutation: true,
|
||||
useInvalidate: true,
|
||||
signal: true,
|
||||
useOperationIdAsQueryKey: true,
|
||||
},
|
||||
useDates: true,
|
||||
useNamedParameters: true,
|
||||
enumGenerationType: 'enum',
|
||||
mutator: {
|
||||
path: './src/api/index.ts',
|
||||
name: 'GeneratedAPIInstance',
|
||||
},
|
||||
|
||||
jsDoc: {
|
||||
filter: (schema) => {
|
||||
const allowlist = [
|
||||
'type',
|
||||
'format',
|
||||
'maxLength',
|
||||
'minLength',
|
||||
'description',
|
||||
'minimum',
|
||||
'maximum',
|
||||
'exclusiveMinimum',
|
||||
'exclusiveMaximum',
|
||||
'pattern',
|
||||
'nullable',
|
||||
'enum',
|
||||
];
|
||||
return Object.entries(schema || {})
|
||||
.filter(([key]) => allowlist.includes(key))
|
||||
.map(([key, value]: [string, any]) => ({
|
||||
key,
|
||||
value,
|
||||
}))
|
||||
.sort((a, b) => a.key.length - b.key.length);
|
||||
},
|
||||
},
|
||||
|
||||
components: {
|
||||
schemas: {
|
||||
suffix: 'DTO',
|
||||
},
|
||||
responses: {
|
||||
suffix: 'Response',
|
||||
},
|
||||
parameters: {
|
||||
suffix: 'Params',
|
||||
},
|
||||
requestBodies: {
|
||||
suffix: 'Body',
|
||||
},
|
||||
},
|
||||
|
||||
// info is of type InfoObject from openapi spec
|
||||
header: (info: { title: string; version: string }): string[] => [
|
||||
`! Do not edit manually`,
|
||||
`* The file has been auto-generated using Orval for SigNoz`,
|
||||
`* regenerate with 'yarn generate:api'`,
|
||||
...(info.title ? [info.title] : []),
|
||||
...(info.version ? [`OpenAPI spec version: ${info.version}`] : []),
|
||||
],
|
||||
|
||||
// @ts-expect-error
|
||||
// propertySortOrder, urlEncodeParameters, aliasCombinedTypes
|
||||
// are valid options in the document without types
|
||||
propertySortOrder: 'Alphabetical',
|
||||
urlEncodeParameters: true,
|
||||
aliasCombinedTypes: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
@@ -14,11 +14,12 @@
|
||||
"jest": "jest",
|
||||
"jest:coverage": "jest --coverage",
|
||||
"jest:watch": "jest --watch",
|
||||
"postinstall": "yarn i18n:generate-hash && (is-ci || yarn husky:configure)",
|
||||
"postinstall": "yarn i18n:generate-hash && (is-ci || yarn husky:configure) && node scripts/update-registry.js",
|
||||
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
|
||||
"commitlint": "commitlint --edit $1",
|
||||
"test": "jest",
|
||||
"test:changedsince": "jest --changedSince=main --coverage --silent"
|
||||
"test:changedsince": "jest --changedSince=main --coverage --silent",
|
||||
"generate:api": "orval --config ./orval.config.ts && sh scripts/post-types-generation.sh && prettier --write src/api/generated && (eslint --fix src/api/generated || true)"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16.15.0"
|
||||
@@ -38,7 +39,7 @@
|
||||
"@mdx-js/loader": "2.3.0",
|
||||
"@mdx-js/react": "2.3.0",
|
||||
"@monaco-editor/react": "^4.3.1",
|
||||
"@playwright/test": "1.54.1",
|
||||
"@playwright/test": "1.55.1",
|
||||
"@radix-ui/react-tabs": "1.0.4",
|
||||
"@radix-ui/react-tooltip": "1.0.7",
|
||||
"@sentry/react": "8.41.0",
|
||||
@@ -47,6 +48,9 @@
|
||||
"@signozhq/button": "0.0.2",
|
||||
"@signozhq/calendar": "0.0.0",
|
||||
"@signozhq/callout": "0.0.2",
|
||||
"@signozhq/checkbox": "0.0.2",
|
||||
"@signozhq/combobox": "0.0.2",
|
||||
"@signozhq/command": "0.0.0",
|
||||
"@signozhq/design-tokens": "1.1.4",
|
||||
"@signozhq/input": "0.0.2",
|
||||
"@signozhq/popover": "0.0.0",
|
||||
@@ -69,7 +73,7 @@
|
||||
"antd": "5.11.0",
|
||||
"antd-table-saveas-excel": "2.2.1",
|
||||
"antlr4": "4.13.2",
|
||||
"axios": "1.8.2",
|
||||
"axios": "1.12.0",
|
||||
"babel-eslint": "^10.1.0",
|
||||
"babel-jest": "^29.6.4",
|
||||
"babel-loader": "9.1.3",
|
||||
@@ -83,6 +87,7 @@
|
||||
"color": "^4.2.1",
|
||||
"color-alpha": "1.1.3",
|
||||
"cross-env": "^7.0.3",
|
||||
"crypto-js": "4.2.0",
|
||||
"css-loader": "5.0.0",
|
||||
"css-minimizer-webpack-plugin": "5.0.1",
|
||||
"d3-hierarchy": "3.1.2",
|
||||
@@ -102,7 +107,6 @@
|
||||
"i18next-http-backend": "^1.3.2",
|
||||
"jest": "^27.5.1",
|
||||
"js-base64": "^3.7.2",
|
||||
"kbar": "0.1.0-beta.48",
|
||||
"less": "^4.1.2",
|
||||
"less-loader": "^10.2.0",
|
||||
"lodash-es": "^4.17.21",
|
||||
@@ -112,7 +116,7 @@
|
||||
"overlayscrollbars": "^2.8.1",
|
||||
"overlayscrollbars-react": "^0.5.6",
|
||||
"papaparse": "5.4.1",
|
||||
"posthog-js": "1.215.5",
|
||||
"posthog-js": "1.298.0",
|
||||
"rc-tween-one": "3.0.6",
|
||||
"react": "18.2.0",
|
||||
"react-addons-update": "15.6.3",
|
||||
@@ -149,7 +153,6 @@
|
||||
"tsconfig-paths-webpack-plugin": "^3.5.1",
|
||||
"typescript": "^4.0.5",
|
||||
"uplot": "1.6.31",
|
||||
"userpilot": "1.3.9",
|
||||
"uuid": "^8.3.2",
|
||||
"web-vitals": "^0.2.4",
|
||||
"webpack": "5.94.0",
|
||||
@@ -186,6 +189,7 @@
|
||||
"@types/color": "^3.0.3",
|
||||
"@types/compression-webpack-plugin": "^9.0.0",
|
||||
"@types/copy-webpack-plugin": "^8.0.1",
|
||||
"@types/crypto-js": "4.2.2",
|
||||
"@types/dompurify": "^2.4.0",
|
||||
"@types/event-source-polyfill": "^1.0.0",
|
||||
"@types/fontfaceobserver": "2.1.0",
|
||||
@@ -217,16 +221,11 @@
|
||||
"compression-webpack-plugin": "9.0.0",
|
||||
"copy-webpack-plugin": "^11.0.0",
|
||||
"eslint": "^7.32.0",
|
||||
"eslint-config-airbnb": "^19.0.4",
|
||||
"eslint-config-airbnb-typescript": "^16.1.4",
|
||||
"eslint-config-prettier": "^8.3.0",
|
||||
"eslint-config-standard": "^16.0.3",
|
||||
"eslint-plugin-import": "^2.28.1",
|
||||
"eslint-plugin-jest": "^26.9.0",
|
||||
"eslint-plugin-jsx-a11y": "^6.5.1",
|
||||
"eslint-plugin-node": "^11.1.0",
|
||||
"eslint-plugin-prettier": "^4.0.0",
|
||||
"eslint-plugin-promise": "^5.1.0",
|
||||
"eslint-plugin-react": "^7.24.0",
|
||||
"eslint-plugin-react-hooks": "^4.3.0",
|
||||
"eslint-plugin-simple-import-sort": "^7.0.0",
|
||||
@@ -240,6 +239,7 @@
|
||||
"lint-staged": "^12.5.0",
|
||||
"msw": "1.3.2",
|
||||
"npm-run-all": "latest",
|
||||
"orval": "7.18.0",
|
||||
"portfinder-sync": "^0.0.2",
|
||||
"postcss": "8.4.38",
|
||||
"prettier": "2.2.1",
|
||||
@@ -279,6 +279,8 @@
|
||||
"prismjs": "1.30.0",
|
||||
"got": "11.8.5",
|
||||
"form-data": "4.0.4",
|
||||
"brace-expansion": "^2.0.2"
|
||||
"brace-expansion": "^2.0.2",
|
||||
"on-headers": "^1.1.0",
|
||||
"tmp": "0.2.4"
|
||||
}
|
||||
}
|
||||
|
||||
3
frontend/public/Logos/agno.svg
Normal file
@@ -0,0 +1,3 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
|
||||
<image width="100" height="100" href="data:image/webp;base64,UklGRqQEAABXRUJQVlA4IJgEAACQJQCdASrhAOEAPm02mUikIyKhJVZ4CIANiWVu4XPw+dx/zz8Y9kq6J+Kv5T8nN4H6MM2Hod6xffvuy+jnoX8wD9Zekj5gP2S/YDsAegB+ovWUeg9/GfPP/cn4dP2/9JnVbPMU2BWP/6ShYsB/INjyXNw88GLWEjqTk7r3B1Jyd0Q60mjXFTuiHUwvdoU9QiTOwXp8O6KdYSPcXtgV/YIjrUu+9aTZYQwhSBOiHuLpesMKhiEeAz91s6AFmAINNYJeBpyuf9KScX17NiZWEjlmJ+PsBWx4Be/O9Vng220j2g97IwDP4CHt7ScFK+ikuWNp4z1G60XIv5OXis+Sp/e/QkOjbE0qKut3VrL3hQfSVuuLp2XQBvxPw8eyiMbE05kQ6k5O6Ml23DSy92Ix+s9tw0svcgAA/Jba8JQ+0uNQuBewKWqirnyDRkjzUXXOeoNxj0dA3QpklBe8QAAMu8geavhwzfTJ2OUqOrwpg8PXz6sktEQLDnTlNclju/V9f4Ny3CgDYwBvljjNaFA8PR18BGAyzmVuHsa0hgskCkgbT6nZi/Vr9AXTc/UZtUS8Z3pKg4hMjWCXdMx0JCsu6v+8ghZmHf/y0VcGbDiugOy3WH36R3zSOIJh1NjhcNYlKfWilZD5ohEfCaTn/D4u3kgC/kLeLF06X5oe/zKySrWWEPQe965rTf4pcEGBZQGkauwtkLPJWVcHoNzJshb6gdVqs0hye7fI5nFTsbJaZmqz8kXeMSEV4zhApVaDyl3Q2P/ATfChzCP87kOBxeVrhSkNkMWS/e1j58dVzpyTubHOt04Z2NKpTlEjBganrkKIYvtuYWGoJWEmtMVtM9TuzNWa3jKX3kWc9zWmQHHawveaHnJn4HJXQwbTC4ZIlxa5el1b8GgqUXzOeF9LIWA3eb7i34/Q3GXZcbs5DLsukl03KclLo5hW5uf04JjGQR5NXHd8as68c3K9Kze3147QuIn1ytP7uE9us639xDuAiHDPr9JiVn62mv80Q2ErNLD+d6VYdhm6J2PCLY5/9mHTExTnIBzRtNoErP6qmMiCXmSM4sX7s14u3yhaE+dVjX1xtxUWtFjH0GWCN/1JNmjtElHq+FjqPWse7tIMvkKm7xFBgoM8yUJGxY+HviWNkOHipLPh7RJ3KehnEZTPIhR5GHlhlATLx+QUQ61+WwhffiT45KPRiK9v8Cd+1YShd6ZGqFSqsxt4FuMSvpMUB+Rd51pUUPuwXHzpXcyz6BXkWw0Mi3jbfH3kxet2Bsqzu0XWmJ2Oj097qP4ayirQzJewKHo8LS0cFG73Lvox2tmD7ZhuRRVmGjyz62sUMVDSzIUN2BHghed2IdUHVnAkof5DrdZIx+umeHp2XSityHdbTywo7r1+ve5+vPk8hhxhISBjbK+qikfPVnhJ8R/L7KUUqchh3AGKVmRGVgXh09MDaEiW/qqPYzP+MBiKfM5eTcHuGLymyFvT2em/o58zIcp7nKtDr90BsurnuOA0XaH3ZPDpSFAUN3HL9Zj7mpQtcFMfpCaC1VMv0l2UvY8xo/6qYAG6UEbsogAAAAA=" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.7 KiB |
1
frontend/public/Logos/amazon-bedrock.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" fill="currentColor" fill-rule="evenodd" style="flex:none;line-height:1" viewBox="0 0 24 24"><title>AWS</title><path d="M6.763 11.212q.002.446.088.71c.064.176.144.368.256.576.04.063.056.127.056.183q.002.12-.152.24l-.503.335a.4.4 0 0 1-.208.072q-.12-.002-.239-.112a2.5 2.5 0 0 1-.287-.375 6 6 0 0 1-.248-.471q-.934 1.101-2.347 1.101c-.67 0-1.205-.191-1.596-.574-.39-.384-.59-.894-.59-1.533 0-.678.24-1.23.726-1.644.487-.415 1.133-.623 1.955-.623.272 0 .551.024.846.064.296.04.6.104.918.176v-.583q-.001-.908-.375-1.277c-.255-.248-.686-.367-1.3-.367-.28 0-.568.031-.863.103s-.583.16-.862.272a2 2 0 0 1-.28.104.5.5 0 0 1-.127.023q-.168.002-.168-.247v-.391c0-.128.016-.224.056-.28a.6.6 0 0 1 .224-.167 4.6 4.6 0 0 1 1.005-.36 4.8 4.8 0 0 1 1.246-.151c.95 0 1.644.216 2.091.647q.661.646.662 1.963v2.586zm-3.24 1.214c.263 0 .534-.048.822-.144a1.8 1.8 0 0 0 .758-.51 1.3 1.3 0 0 0 .272-.512c.047-.191.08-.423.08-.694v-.335a7 7 0 0 0-.735-.136 6 6 0 0 0-.75-.048c-.535 0-.926.104-1.19.32-.263.215-.39.518-.39.917 0 .375.095.655.295.846.191.2.47.296.838.296m6.41.862c-.144 0-.24-.024-.304-.08-.064-.048-.12-.16-.168-.311L7.586 6.726a1.4 1.4 0 0 1-.072-.32c0-.128.064-.2.191-.2h.783q.227-.001.31.08c.065.048.113.16.16.312l1.342 5.284 1.245-5.284q.058-.24.151-.312a.55.55 0 0 1 .32-.08h.638c.152 0 .256.025.32.08.063.048.12.16.151.312l1.261 5.348 1.381-5.348q.074-.24.16-.312a.52.52 0 0 1 .311-.08h.743c.127 0 .2.065.2.2 0 .04-.009.08-.017.128a1 1 0 0 1-.056.2l-1.923 6.17q-.072.24-.168.311a.5.5 0 0 1-.303.08h-.687c-.15 0-.255-.024-.32-.08-.063-.056-.119-.16-.15-.32L12.32 7.747l-1.23 5.14c-.04.16-.087.264-.15.32-.065.056-.177.08-.32.08zm10.256.215c-.415 0-.83-.048-1.229-.143-.399-.096-.71-.2-.918-.32-.128-.071-.215-.151-.247-.223a.6.6 0 0 1-.048-.224v-.407c0-.167.064-.247.183-.247q.072 0 .144.024c.048.016.12.048.2.08q.408.181.878.279c.32.064.63.096.95.096.502 0 .894-.088 1.165-.264a.86.86 0 0 0 .415-.758.78.78 0 0 0-.215-.559c-.144-.151-.416-.287-.807-.415l-1.157-.36c-.583-.183-1.014-.454-1.277-.813a1.9 1.9 0 0 1-.4-1.158q0-.502.216-.886c.144-.255.335-.479.575-.654.24-.184.51-.32.83-.415.32-.096.655-.136 1.006-.136.175 0 .36.008.535.032.183.024.35.056.518.088q.24.058.455.127.216.072.336.144a.7.7 0 0 1 .24.2.43.43 0 0 1 .071.263v.375q-.002.254-.184.256a.8.8 0 0 1-.303-.096 3.65 3.65 0 0 0-1.532-.311c-.455 0-.815.071-1.062.223s-.375.383-.375.71c0 .224.08.416.24.567.16.152.454.304.877.44l1.134.358c.574.184.99.44 1.237.767s.367.702.367 1.117c0 .343-.072.655-.207.926a2.2 2.2 0 0 1-.583.703c-.248.2-.543.343-.886.447-.36.111-.734.167-1.142.167"/><path fill="#f90" d="M.378 15.475c3.384 1.963 7.56 3.153 11.877 3.153 2.914 0 6.114-.607 9.06-1.852.44-.2.814.287.383.607-2.626 1.94-6.442 2.969-9.722 2.969-4.598 0-8.74-1.7-11.87-4.526-.247-.223-.024-.527.272-.351m23.531-.2c.287.36-.08 2.826-1.485 4.007-.215.184-.423.088-.327-.151l.175-.439c.343-.88.802-2.198.52-2.555-.336-.43-2.22-.207-3.074-.103-.255.032-.295-.192-.063-.36 1.5-1.053 3.967-.75 4.254-.399"/></svg>
|
||||
|
After Width: | Height: | Size: 3.0 KiB |
16
frontend/public/Logos/anthropic-api-monitoring.svg
Normal file
@@ -0,0 +1,16 @@
|
||||
<svg version="1.1" id="Layer_1" xmlns:x="ns_extend;" xmlns:i="ns_ai;" xmlns:graph="ns_graphs;" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 92.2 65" style="enable-background:new 0 0 92.2 65;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#FFFFFF;}
|
||||
</style>
|
||||
<metadata>
|
||||
<sfw xmlns="ns_sfw;">
|
||||
<slices>
|
||||
</slices>
|
||||
<sliceSourceBounds bottomLeftOrigin="true" height="65" width="92.2" x="-43.7" y="-98">
|
||||
</sliceSourceBounds>
|
||||
</sfw>
|
||||
</metadata>
|
||||
<path class="st0" d="M66.5,0H52.4l25.7,65h14.1L66.5,0z M25.7,0L0,65h14.4l5.3-13.6h26.9L51.8,65h14.4L40.5,0C40.5,0,25.7,0,25.7,0z
|
||||
M24.3,39.3l8.8-22.8l8.8,22.8H24.3z">
|
||||
</path>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 714 B |
1
frontend/public/Logos/autogen.svg
Normal file
|
After Width: | Height: | Size: 20 KiB |
1
frontend/public/Logos/azure-openai.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24"><title>Azure</title><path fill="url(#a)" d="M7.242 1.613A1.11 1.11 0 0 1 8.295.857h6.977L8.03 22.316a1.11 1.11 0 0 1-1.052.755h-5.43a1.11 1.11 0 0 1-1.053-1.466z"/><path fill="#0078d4" d="M18.397 15.296H7.4a.51.51 0 0 0-.347.882l7.066 6.595c.206.192.477.298.758.298h6.226z"/><path fill="url(#b)" d="M15.272.857H7.497L0 23.071h7.775l1.596-4.73 5.068 4.73h6.665l-2.707-7.775h-7.998z"/><path fill="url(#c)" d="M17.193 1.613a1.11 1.11 0 0 0-1.052-.756h-7.81.035c.477 0 .9.304 1.052.756l6.748 19.992a1.11 1.11 0 0 1-1.052 1.466h-.12 7.895a1.11 1.11 0 0 0 1.052-1.466z"/><defs><linearGradient id="a" x1="8.247" x2="1.002" y1="1.626" y2="23.03" gradientUnits="userSpaceOnUse"><stop stop-color="#114a8b"/><stop offset="1" stop-color="#0669bc"/></linearGradient><linearGradient id="b" x1="14.042" x2="12.324" y1="15.302" y2="15.888" gradientUnits="userSpaceOnUse"><stop stop-opacity=".3"/><stop offset=".071" stop-opacity=".2"/><stop offset=".321" stop-opacity=".1"/><stop offset=".623" stop-opacity=".05"/><stop offset="1" stop-opacity="0"/></linearGradient><linearGradient id="c" x1="12.841" x2="20.793" y1="1.626" y2="22.814" gradientUnits="userSpaceOnUse"><stop stop-color="#3ccbf4"/><stop offset="1" stop-color="#2892df"/></linearGradient></defs></svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
1
frontend/public/Logos/claude-code.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Claude</title><path d="M4.709 15.955l4.72-2.647.08-.23-.08-.128H9.2l-.79-.048-2.698-.073-2.339-.097-2.266-.122-.571-.121L0 11.784l.055-.352.48-.321.686.06 1.52.103 2.278.158 1.652.097 2.449.255h.389l.055-.157-.134-.098-.103-.097-2.358-1.596-2.552-1.688-1.336-.972-.724-.491-.364-.462-.158-1.008.656-.722.881.06.225.061.893.686 1.908 1.476 2.491 1.833.365.304.145-.103.019-.073-.164-.274-1.355-2.446-1.446-2.49-.644-1.032-.17-.619a2.97 2.97 0 01-.104-.729L6.283.134 6.696 0l.996.134.42.364.62 1.414 1.002 2.229 1.555 3.03.456.898.243.832.091.255h.158V9.01l.128-1.706.237-2.095.23-2.695.08-.76.376-.91.747-.492.584.28.48.685-.067.444-.286 1.851-.559 2.903-.364 1.942h.212l.243-.242.985-1.306 1.652-2.064.73-.82.85-.904.547-.431h1.033l.76 1.129-.34 1.166-1.064 1.347-.881 1.142-1.264 1.7-.79 1.36.073.11.188-.02 2.856-.606 1.543-.28 1.841-.315.833.388.091.395-.328.807-1.969.486-2.309.462-3.439.813-.042.03.049.061 1.549.146.662.036h1.622l3.02.225.79.522.474.638-.079.485-1.215.62-1.64-.389-3.829-.91-1.312-.329h-.182v.11l1.093 1.068 2.006 1.81 2.509 2.33.127.578-.322.455-.34-.049-2.205-1.657-.851-.747-1.926-1.62h-.128v.17l.444.649 2.345 3.521.122 1.08-.17.353-.608.213-.668-.122-1.374-1.925-1.415-2.167-1.143-1.943-.14.08-.674 7.254-.316.37-.729.28-.607-.461-.322-.747.322-1.476.389-1.924.315-1.53.286-1.9.17-.632-.012-.042-.14.018-1.434 1.967-2.18 2.945-1.726 1.845-.414.164-.717-.37.067-.662.401-.589 2.388-3.036 1.44-1.882.93-1.086-.006-.158h-.055L4.132 18.56l-1.13.146-.487-.456.061-.746.231-.243 1.908-1.312-.006.006z" fill="#D97757" fill-rule="nonzero"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.7 KiB |
1
frontend/public/Logos/crew-ai.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24"><title>CrewAI</title><path fill="#461816" d="M19.41 10.783a2.75 2.75 0 0 1 2.471 1.355c.483.806.622 1.772.385 2.68l-.136.522a10 10 0 0 1-3.156 5.058c-.605.517-1.283 1.062-2.083 1.524l-.028.017c-.402.232-.884.511-1.398.756-1.19.602-2.475.997-3.798 1.167-.854.111-1.716.155-2.577.132h-.018a8.6 8.6 0 0 1-5.046-1.87l-.012-.01-.012-.01A8.02 8.02 0 0 1 1.22 17.42a10.9 10.9 0 0 1-.102-3.779A15.6 15.6 0 0 1 2.88 8.4a21.8 21.8 0 0 1 2.432-3.678 15.4 15.4 0 0 1 3.56-3.182A10 10 0 0 1 12.44.104h.004l.003-.002c2.057-.384 3.743.374 5.024 1.26a8.3 8.3 0 0 1 2.395 2.513l.024.04.023.042a5.47 5.47 0 0 1 .508 4.012c-.239.97-.577 1.914-1.01 2.814z"/><path fill="#fff" d="M18.861 13.165a.748.748 0 0 1 1.256.031c.199.332.256.73.159 1.103l-.137.522a7.94 7.94 0 0 1-2.504 4.014c-.572.49-1.138.939-1.774 1.306-.427.247-.857.496-1.303.707a9.6 9.6 0 0 1-3.155.973 14.3 14.3 0 0 1-2.257.116 6.53 6.53 0 0 1-3.837-1.422 5.97 5.97 0 0 1-2.071-3.494 8.9 8.9 0 0 1-.085-3.08 13.6 13.6 0 0 1 1.54-4.568 19.7 19.7 0 0 1 2.212-3.348 13.4 13.4 0 0 1 3.088-2.76 7.9 7.9 0 0 1 2.832-1.14c1.307-.245 2.434.207 3.481.933a6.2 6.2 0 0 1 1.806 1.892c.423.767.536 1.668.314 2.515a12.4 12.4 0 0 1-.99 2.67l-.223.497q-.48 1.07-.97 2.137a.76.76 0 0 1-.97.467 3.39 3.39 0 0 1-2.283-2.49c-.095-.83.04-1.669.39-2.426.288-.746.61-1.477.933-2.208l.248-.563a.53.53 0 0 0-.204-.742 2.35 2.35 0 0 0-1.2.702 25 25 0 0 0-1.614 1.767 21.6 21.6 0 0 0-2.619 4.184 7.6 7.6 0 0 0-.816 2.753 7 7 0 0 0 .07 2.219 2.055 2.055 0 0 0 1.934 1.715c1.801.1 3.59-.363 5.116-1.328a19 19 0 0 0 1.675-1.294c.752-.71 1.376-1.519 1.958-2.36"/></svg>
|
||||
|
After Width: | Height: | Size: 1.7 KiB |
1
frontend/public/Logos/dashboards.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="150" height="150" fill="none" viewBox="0 0 150 150"><circle cx="75" cy="75" r="70" fill="#f3f4f6"/><g transform="translate(25 25)"><rect width="60" height="55" fill="#fcd34d" rx="6"/><path stroke="#d97706" stroke-linecap="round" stroke-linejoin="round" stroke-width="4" d="m10 40 15-15 15 8 15-23"/><rect width="35" height="55" x="65" fill="#6ee7b7" rx="6"/><rect width="20" height="6" x="73" y="10" fill="#059669" rx="3"/><rect width="15" height="6" x="73" y="22" fill="#059669" opacity=".7" rx="3"/><rect width="18" height="6" x="73" y="34" fill="#059669" opacity=".7" rx="3"/><rect width="100" height="40" y="60" fill="#a5b4fc" rx="6"/><rect width="80" height="8" x="10" y="70" fill="#4f46e5" rx="4"/><rect width="50" height="8" x="20" y="83" fill="#6366f1" rx="4"/></g></svg>
|
||||
|
After Width: | Height: | Size: 826 B |
1
frontend/public/Logos/deepseek.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>DeepSeek</title><path d="M23.748 4.482c-.254-.124-.364.113-.512.234-.051.039-.094.09-.137.136-.372.397-.806.657-1.373.626-.829-.046-1.537.214-2.163.848-.133-.782-.575-1.248-1.247-1.548-.352-.156-.708-.311-.955-.65-.172-.241-.219-.51-.305-.774-.055-.16-.11-.323-.293-.35-.2-.031-.278.136-.356.276-.313.572-.434 1.202-.422 1.84.027 1.436.633 2.58 1.838 3.393.137.093.172.187.129.323-.082.28-.18.552-.266.833-.055.179-.137.217-.329.14a5.526 5.526 0 01-1.736-1.18c-.857-.828-1.631-1.742-2.597-2.458a11.365 11.365 0 00-.689-.471c-.985-.957.13-1.743.388-1.836.27-.098.093-.432-.779-.428-.872.004-1.67.295-2.687.684a3.055 3.055 0 01-.465.137 9.597 9.597 0 00-2.883-.102c-1.885.21-3.39 1.102-4.497 2.623C.082 8.606-.231 10.684.152 12.85c.403 2.284 1.569 4.175 3.36 5.653 1.858 1.533 3.997 2.284 6.438 2.14 1.482-.085 3.133-.284 4.994-1.86.47.234.962.327 1.78.397.63.059 1.236-.03 1.705-.128.735-.156.684-.837.419-.961-2.155-1.004-1.682-.595-2.113-.926 1.096-1.296 2.746-2.642 3.392-7.003.05-.347.007-.565 0-.845-.004-.17.035-.237.23-.256a4.173 4.173 0 001.545-.475c1.396-.763 1.96-2.015 2.093-3.517.02-.23-.004-.467-.247-.588zM11.581 18c-2.089-1.642-3.102-2.183-3.52-2.16-.392.024-.321.471-.235.763.09.288.207.486.371.739.114.167.192.416-.113.603-.673.416-1.842-.14-1.897-.167-1.361-.802-2.5-1.86-3.301-3.307-.774-1.393-1.224-2.887-1.298-4.482-.02-.386.093-.522.477-.592a4.696 4.696 0 011.529-.039c2.132.312 3.946 1.265 5.468 2.774.868.86 1.525 1.887 2.202 2.891.72 1.066 1.494 2.082 2.48 2.914.348.292.625.514.891.677-.802.09-2.14.11-3.054-.614zm1-6.44a.306.306 0 01.415-.287.302.302 0 01.2.288.306.306 0 01-.31.307.303.303 0 01-.304-.308zm3.11 1.596c-.2.081-.399.151-.59.16a1.245 1.245 0 01-.798-.254c-.274-.23-.47-.358-.552-.758a1.73 1.73 0 01.016-.588c.07-.327-.008-.537-.239-.727-.187-.156-.426-.199-.688-.199a.559.559 0 01-.254-.078c-.11-.054-.2-.19-.114-.358.028-.054.16-.186.192-.21.356-.202.767-.136 1.146.016.352.144.618.408 1.001.782.391.451.462.576.685.914.176.265.336.537.445.848.067.195-.019.354-.25.452z" fill="#4D6BFE"></path></svg>
|
||||
|
After Width: | Height: | Size: 2.1 KiB |
1
frontend/public/Logos/envoy.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 128 128"><path fill="#b31aab" d="m33.172 61.48.176 7.325 7.797 4.785-.176-7.324Zm19.031 30.504-.176-7.18-6.84-4.206c-.085-.059-.203-.145-.289-.203l.176 7.207Zm-24.355 9.688L10.012 90.715l-.438-18.367 8.758-3.746-.172-7.352-13.969 5.969c-1.074.46-1.714 1.441-1.687 2.566l.523 22.055c.032 1.125.73 2.25 1.836 2.941l21.383 13.149c.992.605 2.215.78 3.203.46a.8.8 0 0 0 .29-.113l13.124-5.593-7.129-4.383Zm0 0"/><path fill="#d163ce" d="M85.488 61.047c-.031-1.328-.843-2.625-2.125-3.43L57.38 41.672l-.813.344.176 7.726 20.57 12.63.493 20.648 7.86 4.812.433-.172ZM54.383 97.289 30.262 82.47l-.582-24.883 11-4.7-.203-8.562-17.082 7.293c-1.25.547-2.008 1.672-1.977 3l.668 29.18c.031 1.324.844 2.625 2.125 3.402l28.281 17.387c1.164.723 2.563.894 3.754.547.117-.028.234-.086.348-.145l16.703-7.12-8.32-5.102Zm0 0"/><path fill="#e13eaf" d="M122.234 40.633 85.98 18.343c-1.335-.808-2.937-1.038-4.304-.605-.145.028-.262.086-.406.145l-35.383 15.11c-1.426.605-2.297 1.902-2.27 3.429l.903 37.371c.03 1.527.96 2.996 2.445 3.89l36.254 22.262c1.336.805 2.937 1.035 4.277.606.145-.031.262-.09.406-.145l35.383-15.11c1.426-.605 2.297-1.933 2.27-3.433l-.875-37.367c-.028-1.473-.961-2.969-2.446-3.863M85.398 91.64 53.891 72.293l-.79-32.496 30.727-13.121 31.512 19.347.785 32.497Zm0 0"/></svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
1
frontend/public/Logos/fly-io.svg
Normal file
|
After Width: | Height: | Size: 6.0 KiB |
1
frontend/public/Logos/google-gemini.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Gemini</title><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="#3186FF"></path><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="url(#lobe-icons-gemini-fill-0)"></path><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="url(#lobe-icons-gemini-fill-1)"></path><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="url(#lobe-icons-gemini-fill-2)"></path><defs><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-gemini-fill-0" x1="7" x2="11" y1="15.5" y2="12"><stop stop-color="#08B962"></stop><stop offset="1" stop-color="#08B962" stop-opacity="0"></stop></linearGradient><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-gemini-fill-1" x1="8" x2="11.5" y1="5.5" y2="11"><stop stop-color="#F94543"></stop><stop offset="1" stop-color="#F94543" stop-opacity="0"></stop></linearGradient><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-gemini-fill-2" x1="3.5" x2="17.5" y1="13.5" y2="12"><stop stop-color="#FABC12"></stop><stop offset=".46" stop-color="#FABC12" stop-opacity="0"></stop></linearGradient></defs></svg>
|
||||
|
After Width: | Height: | Size: 2.8 KiB |
7
frontend/public/Logos/grok.svg
Normal file
|
After Width: | Height: | Size: 14 KiB |
1
frontend/public/Logos/honeycomb.svg
Normal file
|
After Width: | Height: | Size: 5.5 KiB |
4
frontend/public/Logos/inkeep.svg
Normal file
@@ -0,0 +1,4 @@
|
||||
<svg width="361" height="224" viewBox="0 0 361 224" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M67.3465 4.3208C62.5234 4.3208 58.0667 6.89389 55.6552 11.0708L1.80865 104.336C-0.602889 108.513 -0.602883 113.659 1.80866 117.836L55.6552 211.101C58.0667 215.277 62.5234 217.851 67.3465 217.851H175.039C179.863 217.851 184.319 215.277 186.731 211.101L240.577 117.836C242.989 113.659 242.989 108.513 240.577 104.336L186.731 11.0708C184.319 6.89388 179.863 4.3208 175.039 4.3208L67.3465 4.3208Z" fill="#D5E5FF"/>
|
||||
<path d="M287.008 2.29488C319.287 10.6012 315.609 28.9193 328.995 66.1793C337.583 90.084 379.104 119.725 341.16 177.344C329.837 194.54 293.425 214.941 267.079 220.6C209.035 232.875 174.834 205.018 144.494 159.09C121.73 124.349 129.773 74.335 164.299 49.7761C197.018 26.5653 244.576 -9.45146 287.008 2.29488Z" fill="#69A3FF"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 853 B |
1
frontend/public/Logos/langchain.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>LangChain</title><path d="M8.373 14.502c.013-.06.024-.118.038-.17l.061.145c.115.28.229.557.506.714-.012.254-.334.357-.552.326-.048-.114-.115-.228-.255-.164-.143.056-.3-.01-.266-.185.333-.012.407-.371.468-.666zM18.385 9.245c-.318 0-.616.122-.839.342l-.902.887c-.243.24-.368.572-.343.913l.006.056c.032.262.149.498.337.682.13.128.273.21.447.266a.866.866 0 01-.247.777l-.056.055a2.022 2.022 0 01-1.355-1.555l-.01-.057-.046.037c-.03.024-.06.05-.088.078l-.902.887a1.156 1.156 0 000 1.65c.231.228.535.342.84.342.304 0 .607-.114.838-.341l.902-.888a1.156 1.156 0 00-.436-1.921.953.953 0 01.276-.842 2.062 2.062 0 011.371 1.57l.01.057.047-.037c.03-.024.06-.05.088-.078l.902-.888a1.155 1.155 0 000-1.65 1.188 1.188 0 00-.84-.342z" fill="#1C3C3C"></path><path clip-rule="evenodd" d="M17.901 6H6.1C2.736 6 0 8.692 0 12s2.736 6 6.099 6H17.9C21.264 18 24 15.308 24 12s-2.736-6-6.099-6zm-5.821 9.407c-.195.04-.414.047-.562-.106-.045.1-.136.077-.221.056a.797.797 0 00-.061-.014c-.01.025-.017.048-.026.073-.329.021-.575-.309-.732-.558a4.991 4.991 0 00-.473-.21c-.172-.07-.345-.14-.509-.23a2.218 2.218 0 00-.004.173c-.002.244-.004.503-.227.651-.007.295.236.292.476.29.207-.003.41-.005.447.184a.485.485 0 01-.05.003c-.046 0-.092 0-.127.034-.117.111-.242.063-.372.013-.12-.046-.243-.094-.367-.02a2.318 2.318 0 00-.262.154.97.97 0 01-.548.194c-.024-.036-.014-.059.006-.08a.562.562 0 00.043-.056c.019-.028.035-.057.051-.084.054-.095.103-.18.242-.22-.185-.029-.344.055-.5.137l-.004.002a4.21 4.21 0 01-.065.034c-.097.04-.154.009-.212-.023-.082-.045-.168-.092-.376.04-.04-.032-.02-.061.002-.086.091-.109.21-.125.345-.119-.351-.193-.604-.056-.81.055-.182.098-.327.176-.471-.012-.065.017-.102.063-.138.108-.015.02-.03.038-.047.055-.035-.039-.027-.083-.018-.128l.005-.026a.242.242 0 00.003-.03l-.027-.01c-.053-.022-.105-.044-.09-.124-.117-.04-.2.03-.286.094-.054-.041-.01-.095.032-.145a.279.279 0 00.045-.065c.038-.065.103-.067.166-.069.054-.001.108-.003.145-.042.133-.075.297-.036.462.003.121.028.242.057.354.042.203.025.454-.18.352-.385-.186-.233-.184-.528-.183-.813v-.143c-.016-.108-.172-.233-.328-.358-.12-.095-.24-.191-.298-.28-.16-.177-.285-.382-.409-.585l-.015-.024c-.212-.404-.297-.86-.382-1.315-.103-.546-.205-1.09-.526-1.54-.266.144-.612.075-.841-.118-.12.107-.13.247-.138.396l-.001.014c-.297-.292-.26-.844-.023-1.17.097-.128.213-.233.342-.326.03-.021.04-.042.039-.074.235-1.04 1.836-.839 2.342-.103.167.206.281.442.395.678.137.283.273.566.5.795.22.237.452.463.684.689.359.35.718.699 1.032 1.089.49.587.839 1.276 1.144 1.97.05.092.08.193.11.293.044.15.089.299.2.417.026.035.084.088.149.148.156.143.357.328.289.409.009.019.027.04.05.06.032.028.074.058.116.088.122.087.25.178.16.25zm7.778-3.545l-.902.887c-.24.237-.537.413-.859.51l-.017.005-.006.015A2.021 2.021 0 0117.6 14l-.902.888c-.393.387-.916.6-1.474.6-.557 0-1.08-.213-1.474-.6a2.03 2.03 0 010-2.9l.902-.888c.242-.238.531-.409.859-.508l.016-.004.006-.016c.105-.272.265-.516.475-.724l.902-.887c.393-.387.917-.6 1.474-.6.558 0 1.08.213 1.474.6.394.387.61.902.61 1.45 0 .549-.216 1.064-.61 1.45v.001z" fill="#1C3C3C" fill-rule="evenodd"></path></svg>
|
||||
|
After Width: | Height: | Size: 3.1 KiB |