mirror of
https://github.com/SigNoz/signoz.git
synced 2026-03-01 19:42:55 +00:00
Compare commits
9 Commits
tvats-expo
...
metric-exp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8737030580 | ||
|
|
c21e463e55 | ||
|
|
2091e63195 | ||
|
|
95fc905448 | ||
|
|
9f5afaf36f | ||
|
|
3f69d5cdc2 | ||
|
|
98617b5120 | ||
|
|
df30852296 | ||
|
|
b9beabb425 |
10
.github/workflows/jsci.yaml
vendored
10
.github/workflows/jsci.yaml
vendored
@@ -61,3 +61,13 @@ jobs:
|
||||
with:
|
||||
PRIMUS_REF: main
|
||||
JS_SRC: frontend
|
||||
md-languages:
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && ! github.event.pull_request.head.repo.fork && github.event.pull_request.user.login != 'dependabot[bot]' && ! contains(github.event.pull_request.labels.*.name, 'safe-to-test')) ||
|
||||
(github.event_name == 'pull_request_target' && contains(github.event.pull_request.labels.*.name, 'safe-to-test'))
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: validate md languages
|
||||
run: bash frontend/scripts/validate-md-languages.sh
|
||||
|
||||
4
.vscode/settings.json
vendored
4
.vscode/settings.json
vendored
@@ -17,7 +17,5 @@
|
||||
},
|
||||
"[html]": {
|
||||
"editor.defaultFormatter": "vscode.html-language-features"
|
||||
},
|
||||
"python-envs.defaultEnvManager": "ms-python.python:system",
|
||||
"python-envs.pythonProjects": []
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2858,186 +2858,6 @@ paths:
|
||||
summary: Update auth domain
|
||||
tags:
|
||||
- authdomains
|
||||
/api/v1/export_raw_data:
|
||||
get:
|
||||
deprecated: false
|
||||
description: This endpoints allows simple query exporting raw data for traces
|
||||
and logs
|
||||
operationId: HandleExportRawDataGET
|
||||
parameters:
|
||||
- in: query
|
||||
name: format
|
||||
schema:
|
||||
default: csv
|
||||
enum:
|
||||
- csv
|
||||
- jsonl
|
||||
type: string
|
||||
- in: query
|
||||
name: signal
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/components/schemas/TelemetrytypesSignal'
|
||||
- deprecated: true
|
||||
in: query
|
||||
name: source
|
||||
schema:
|
||||
deprecated: true
|
||||
type: string
|
||||
- in: query
|
||||
name: start
|
||||
schema:
|
||||
minimum: 0
|
||||
type: integer
|
||||
- in: query
|
||||
name: end
|
||||
schema:
|
||||
minimum: 0
|
||||
type: integer
|
||||
- in: query
|
||||
name: limit
|
||||
schema:
|
||||
default: 10000
|
||||
maximum: 50000
|
||||
minimum: 1
|
||||
type: integer
|
||||
- deprecated: true
|
||||
in: query
|
||||
name: filter
|
||||
schema:
|
||||
deprecated: true
|
||||
type: string
|
||||
- content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5Filter'
|
||||
in: query
|
||||
name: filterExpression
|
||||
- deprecated: true
|
||||
in: query
|
||||
name: columns
|
||||
schema:
|
||||
deprecated: true
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
- in: query
|
||||
name: selectFields
|
||||
schema:
|
||||
items:
|
||||
$ref: '#/components/schemas/TelemetrytypesTelemetryFieldKey'
|
||||
type: array
|
||||
- deprecated: true
|
||||
in: query
|
||||
name: order_by
|
||||
schema:
|
||||
deprecated: true
|
||||
type: string
|
||||
- in: query
|
||||
name: order
|
||||
schema:
|
||||
items:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
|
||||
type: array
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: string
|
||||
description: OK
|
||||
"400":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Bad Request
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- VIEWER
|
||||
- tokenizer:
|
||||
- VIEWER
|
||||
summary: Export raw data
|
||||
tags:
|
||||
- logs
|
||||
- traces
|
||||
post:
|
||||
deprecated: false
|
||||
description: This endpoints allows complex query exporting raw data for traces
|
||||
and logs
|
||||
operationId: HandleExportRawDataPOST
|
||||
parameters:
|
||||
- in: query
|
||||
name: format
|
||||
schema:
|
||||
default: csv
|
||||
enum:
|
||||
- csv
|
||||
- jsonl
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5QueryRangeRequest'
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: string
|
||||
description: OK
|
||||
"400":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Bad Request
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- VIEWER
|
||||
- tokenizer:
|
||||
- VIEWER
|
||||
summary: Export raw data
|
||||
tags:
|
||||
- logs
|
||||
- traces
|
||||
/api/v1/fields/keys:
|
||||
get:
|
||||
deprecated: false
|
||||
@@ -5639,6 +5459,10 @@ paths:
|
||||
name: searchText
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: source
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
|
||||
127
docs/contributing/go/abstractions.md
Normal file
127
docs/contributing/go/abstractions.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# Abstractions
|
||||
|
||||
This document provides rules for deciding when a new type, interface, or intermediate representation is warranted in Go code. The goal is to keep the codebase navigable by ensuring every abstraction earns its place.
|
||||
|
||||
## The cost of a new abstraction
|
||||
|
||||
Every exported type, interface, or wrapper is a permanent commitment. It must be named, documented, tested, and understood by every future contributor. It creates a new concept in the codebase vocabulary. Before introducing one, verify that the cost is justified by a concrete benefit that cannot be achieved with existing mechanisms.
|
||||
|
||||
## Before you introduce anything new
|
||||
|
||||
Answer these four questions. If writing a PR, include the answers in the description.
|
||||
|
||||
1. **What already exists?** Name the specific type, function, interface, or library that covers this ground today.
|
||||
2. **What does the new abstraction add?** Name the concrete operation, guarantee, or capability. "Cleaner" or "more reusable" are not sufficient; name what the caller can do that it could not do before.
|
||||
3. **What does the new abstraction drop?** If it wraps or mirrors an existing structure, list what it cannot represent. Every gap must be either justified or handled with an explicit error.
|
||||
4. **Who consumes it?** List the call sites. If there is only one producer and one consumer in the same call chain, you likely need a function, not a type.
|
||||
|
||||
## Rules
|
||||
|
||||
### 1. Prefer functions over types
|
||||
|
||||
If a piece of logic has one input and one output, write a function. Do not create a struct to hold intermediate state that is built in one place and read in one place. A function is easier to test, easier to inline, and does not expand the vocabulary of the codebase.
|
||||
|
||||
```go
|
||||
// Prefer this:
|
||||
func ConvertConfig(src ExternalConfig) (InternalConfig, error)
|
||||
|
||||
// Over this:
|
||||
type ConfigAdapter struct { ... }
|
||||
func NewConfigAdapter(src ExternalConfig) *ConfigAdapter
|
||||
func (a *ConfigAdapter) ToInternal() (InternalConfig, error)
|
||||
```
|
||||
|
||||
The two-step version is only justified when `ConfigAdapter` has multiple distinct consumers that use it in different ways.
|
||||
|
||||
### 2. Do not duplicate structures you do not own
|
||||
|
||||
When a library or external package produces a structured output, operate on that output directly. Do not create a parallel type that mirrors a subset of its fields.
|
||||
|
||||
A partial copy will:
|
||||
- **Silently lose data** when the source has fields or variants the copy does not account for.
|
||||
- **Drift** when the source evolves and the copy is not updated in lockstep.
|
||||
- **Add a conversion step** that doubles the code surface and the opportunity for bugs.
|
||||
|
||||
If you need to shield consumers from a dependency, define a narrow interface over the dependency's type rather than copying its shape into a new struct.
|
||||
|
||||
### 3. Never silently discard input
|
||||
|
||||
If your code receives structured input and cannot handle part of it, return an error. Do not silently return nil, skip the element, or produce a partial result. Silent data loss is the hardest class of bug to detect because the code appears to work, it just produces wrong results.
|
||||
|
||||
```go
|
||||
// Wrong: silently ignores the unrecognized case.
|
||||
default:
|
||||
return nil
|
||||
|
||||
// Right: makes the gap visible.
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported %T value: %v", v, v)
|
||||
```
|
||||
|
||||
This applies broadly: type switches, format conversions, data migrations, enum mappings, configuration parsing. Anywhere a `default` or `else` branch can swallow input, it should surface an error instead.
|
||||
|
||||
### 4. Do not expose methods that lose information
|
||||
|
||||
A method on a structured type should not strip meaning from the structure it belongs to. If a caller needs to iterate over elements for a specific purpose (validation, aggregation, logging), write that logic as a standalone function that operates on the structure with full context, rather than adding a method that returns a reduced view.
|
||||
|
||||
```go
|
||||
// Problematic: callers cannot distinguish how items were related.
|
||||
func (o *Order) AllLineItems() []LineItem { ... }
|
||||
|
||||
// Better: the validation logic operates on the full structure.
|
||||
func ValidateOrder(o *Order) error { ... }
|
||||
```
|
||||
|
||||
Public methods shape how a type is used. Once a lossy accessor exists, callers will depend on it, and the lost information becomes unrecoverable at those call sites.
|
||||
|
||||
### 5. Interfaces should be discovered, not predicted
|
||||
|
||||
Do not define an interface before you have at least two concrete implementations that need it. An interface with one implementation is not abstraction; it is indirection that makes it harder to navigate from call site to implementation.
|
||||
|
||||
The exception is interfaces required for testing (e.g., for mocking an external dependency). In that case, define the interface in the **consuming** package, not the providing package, following the Go convention of [accepting interfaces and returning structs](https://go.dev/wiki/CodeReviewComments#interfaces).
|
||||
|
||||
### 6. Wrappers must add semantics, not just rename
|
||||
|
||||
A wrapper type is justified when it adds meaning, validation, or invariants that the underlying type does not carry. It is not justified when it merely renames fields or reorganizes the same data into a different shape.
|
||||
|
||||
```go
|
||||
// Justified: adds validation that the underlying string does not carry.
|
||||
type OrgID struct{ value string }
|
||||
func NewOrgID(s string) (OrgID, error) { /* validates format */ }
|
||||
|
||||
// Not justified: renames fields with no new invariant or behavior.
|
||||
type UserInfo struct {
|
||||
Name string // same as source.Name
|
||||
Email string // same as source.Email
|
||||
}
|
||||
```
|
||||
|
||||
Ask: what does the wrapper guarantee that the underlying type does not? If the answer is nothing, use the underlying type directly.
|
||||
|
||||
## When a new type IS warranted
|
||||
|
||||
A new type earns its place when it meets **at least one** of these criteria:
|
||||
|
||||
- **Serialization boundary**: It must be persisted, sent over the wire, or written to config. The source type is unsuitable (unexported fields, function pointers, cycles).
|
||||
- **Invariant enforcement**: The constructor or methods enforce constraints that raw data does not carry (e.g., non-empty, validated format, bounded range).
|
||||
- **Multiple distinct consumers**: Three or more call sites use the type in meaningfully different ways. The type is the shared vocabulary between them.
|
||||
- **Dependency firewall**: The type lives in a lightweight package so that consumers avoid importing a heavy dependency.
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- A function is almost always simpler than a type. Start with a function; promote to a type only when you have evidence of need.
|
||||
- Never silently drop data. If you cannot handle it, error.
|
||||
- If your new type mirrors an existing one, you need a strong reason beyond "nicer to work with".
|
||||
- If your type has one producer and one consumer, it is indirection, not abstraction.
|
||||
- Interfaces come from need (multiple implementations), not from prediction.
|
||||
- When in doubt, do not add it. It is easier to add an abstraction later when the need is clear than to remove one after it has spread through the codebase.
|
||||
|
||||
## Further reading
|
||||
|
||||
These works and our own lessions shaped the above guidelines
|
||||
|
||||
- [The Wrong Abstraction](https://sandimetz.com/blog/2016/1/20/the-wrong-abstraction) - Sandi Metz. The wrong abstraction is worse than duplication. If you find yourself passing parameters and adding conditional paths through shared code, inline it back into every caller and let the duplication show you what the right abstraction is.
|
||||
- [Write code that is easy to delete, not easy to extend](https://programmingisterrible.com/post/139222674273/write-code-that-is-easy-to-delete-not-easy-to) - tef. Every abstraction is a bet on the future. Optimize for how cheaply you can remove code when the bet is wrong, not for how easily you can extend it when the bet is right.
|
||||
- [Goodbye, Clean Code](https://overreacted.io/goodbye-clean-code/) - Dan Abramov. A refactoring that removes duplication can look cleaner while making the code harder to change. Clean-looking and easy-to-change are not the same thing.
|
||||
- [A Philosophy of Software Design](https://www.amazon.com/Philosophy-Software-Design-John-Ousterhout/dp/1732102201) - John Ousterhout. Good abstractions are deep: simple interface, complex implementation. A "false abstraction" omits important details while appearing simple, and is worse than no abstraction at all. ([Summary by Pragmatic Engineer](https://blog.pragmaticengineer.com/a-philosophy-of-software-design-review/))
|
||||
- [Simplicity is Complicated](https://go.dev/talks/2015/simplicity-is-complicated.slide) - Rob Pike. Go-specific. Fewer orthogonal concepts that compose predictably beat many overlapping ones. Features were left out of Go deliberately; the same discipline applies to your own code.
|
||||
@@ -123,7 +123,6 @@ if err := router.Handle("/api/v1/things", handler.New(
|
||||
Description: "This endpoint creates a thing",
|
||||
Request: new(types.PostableThing),
|
||||
RequestContentType: "application/json",
|
||||
RequestQuery: new(types.QueryableThing),
|
||||
Response: new(types.GettableThing),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusCreated,
|
||||
@@ -156,8 +155,6 @@ The `handler.New` function ties the HTTP handler to OpenAPI metadata via `OpenAP
|
||||
- **Request / RequestContentType**:
|
||||
- `Request` is a Go type that describes the request body or form.
|
||||
- `RequestContentType` is usually `"application/json"` or `"application/x-www-form-urlencoded"` (for callbacks like SAML).
|
||||
- **RequestQuery**:
|
||||
- `RequestQuery` is a Go type that descirbes query url params.
|
||||
- **RequestExamples**: An array of `handler.OpenAPIExample` that provide concrete request payloads in the generated spec. See [Adding request examples](#adding-request-examples) below.
|
||||
- **Response / ResponseContentType**:
|
||||
- `Response` is the Go type for the successful response payload.
|
||||
|
||||
126
docs/contributing/go/packages.md
Normal file
126
docs/contributing/go/packages.md
Normal file
@@ -0,0 +1,126 @@
|
||||
# Packages
|
||||
|
||||
All shared Go code in SigNoz lives under `pkg/`. Each package represents a distinct domain concept and exposes a clear public interface. This guide covers the conventions for creating, naming, and organising packages so the codebase stays consistent as it grows.
|
||||
|
||||
## How should I name a package?
|
||||
|
||||
Use short, lowercase, single-word names. No underscores or camelCase (`querier`, `cache`, `authz`, not `query_builder` or `dataStore`).
|
||||
|
||||
Names must be **domain-specific**. A package name should tell you what problem domain it deals with, not what data structure it wraps. Prefer `alertmanager` over `manager`, `licensing` over `checker`.
|
||||
|
||||
Avoid generic names like `util`, `helpers`, `common`, `misc`, or `base`. If you can't name it, the code probably belongs in an existing package.
|
||||
|
||||
## When should I create a new package?
|
||||
|
||||
Create a new package when:
|
||||
|
||||
- The functionality represents a **distinct domain concept** (e.g., `authz`, `licensing`, `cache`).
|
||||
- Two or more other packages would import it; it serves as shared infrastructure.
|
||||
- The code has a clear public interface that can stand on its own.
|
||||
|
||||
Do **not** create a new package when:
|
||||
|
||||
- There is already a package that covers the same domain. Extend the existing package instead.
|
||||
- The code is only used in one place. Keep it local to the caller.
|
||||
- You are splitting purely for file size. Use multiple files within the same package instead.
|
||||
|
||||
## How should I lay out a package?
|
||||
|
||||
A typical package looks like:
|
||||
|
||||
```
|
||||
pkg/cache/
|
||||
├── cache.go # Public interface + exported types
|
||||
├── config.go # Configuration types if needed
|
||||
├── memorycache/ # Implementation sub-package
|
||||
├── rediscache/ # Another implementation
|
||||
└── cachetest/ # Test helpers for consumers
|
||||
```
|
||||
|
||||
Follow these rules:
|
||||
|
||||
1. **Interface-first file**: The file matching the package name (e.g., `cache.go` in `pkg/cache/`) should define the public interface and core exported types. Keep implementation details out of this file.
|
||||
|
||||
2. **One responsibility per file**: Name files after what they contain (`config.go`, `handler.go`, `service.go`), not after the package name. If a package merges two concerns, prefix files to group them (e.g., `memory_store.go`, `redis_store.go` in a storage package).
|
||||
|
||||
3. **Sub-packages for implementations**: When a package defines an interface with multiple implementations, put each implementation in its own sub-package (`memorycache/`, `rediscache/`). This keeps the parent package import-free of implementation dependencies.
|
||||
|
||||
4. **Test helpers in `{pkg}test/`**: If consumers need test mocks or builders, put them in a `{pkg}test/` sub-package (e.g., `cachetest/`, `sqlstoretest/`). This avoids polluting the main package with test-only code.
|
||||
|
||||
5. **Test files stay alongside source**: Unit tests go in `_test.go` files next to the code they test, in the same package.
|
||||
|
||||
## How should I name symbols?
|
||||
|
||||
### Exported symbols
|
||||
|
||||
- **Interfaces**: For single-method interfaces, follow the standard `-er` suffix convention (`Reader`, `Writer`, `Closer`). For multi-method interfaces, use clear nouns (`Cache`, `Store`, `Provider`).
|
||||
- **Constructors**: `New<Type>(...)` (e.g., `NewMemoryCache()`).
|
||||
- **Avoid stutter**: Since callers qualify with the package name, don't repeat it. Write `cache.Cache`, not `cache.CacheInterface`. Write `authz.FromRole`, not `authz.AuthzFromRole`.
|
||||
|
||||
### Unexported symbols
|
||||
|
||||
- Struct receivers: one or two characters (`c`, `f`, `br`).
|
||||
- Helper functions: descriptive lowercase names (`parseToken`, `buildQuery`).
|
||||
|
||||
### Constants
|
||||
|
||||
- Use `PascalCase` for exported constants.
|
||||
- When merging files from different origins into one package, watch out for **name collisions** across files. Prefix to disambiguate when two types share a natural name.
|
||||
|
||||
## How should I organise imports?
|
||||
|
||||
Group imports in three blocks separated by blank lines:
|
||||
|
||||
```go
|
||||
import (
|
||||
// 1. Standard library
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
// 2. External dependencies
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
// 3. Internal
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
)
|
||||
```
|
||||
|
||||
Never introduce circular imports. If package A needs package B and B needs A, extract the shared types into a third package (often under `pkg/types/`).
|
||||
|
||||
## Where do shared types go?
|
||||
|
||||
Most types belong in `pkg/types/` under a domain-specific sub-package (e.g., `pkg/types/ruletypes`, `pkg/types/authtypes`).
|
||||
|
||||
Do not put domain logic in `pkg/types/`. Only data structures, constants, and simple methods.
|
||||
|
||||
## How do I merge or move packages?
|
||||
|
||||
When two packages are tightly coupled (one imports the other's constants, they cover the same domain), merge them:
|
||||
|
||||
1. Pick a domain-specific name for the combined package.
|
||||
2. Prefix files to preserve origin (e.g., `memory_store.go`, `redis_store.go`).
|
||||
3. Resolve symbol conflicts explicitly; rename with a prefix rather than silently shadowing.
|
||||
4. Update all consumers in a single change.
|
||||
5. Delete the old packages. Do not leave behind re-export shims.
|
||||
6. Verify with `go build ./...`, `go test ./<new-pkg>/...`, and `go vet ./...`.
|
||||
|
||||
## When should I add documentation?
|
||||
|
||||
Add a `doc.go` with a package-level comment for any package that is non-trivial or has multiple consumers. Keep it to 1–3 sentences:
|
||||
|
||||
```go
|
||||
// Package cache provides a caching interface with pluggable backends
|
||||
// for in-memory and Redis-based storage.
|
||||
package cache
|
||||
```
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- Package names are domain-specific and lowercase. Never generic names like `util` or `common`.
|
||||
- The file matching the package name (e.g., `cache.go`) defines the public interface. Implementation details go elsewhere.
|
||||
- Never introduce circular imports. Extract shared types into `pkg/types/` when needed.
|
||||
- Watch for symbol name collisions when merging packages, prefix to disambiguate.
|
||||
- Put test helpers in a `{pkg}test/` sub-package, not in the main package.
|
||||
- Before submitting, verify with `go build ./...`, `go test ./<your-pkg>/...`, and `go vet ./...`.
|
||||
- Update all consumers when you rename or move symbols.
|
||||
@@ -8,4 +8,15 @@ We adhere to three primary style guides as our foundation:
|
||||
- [Code Review Comments](https://go.dev/wiki/CodeReviewComments) - For understanding common comments in code reviews
|
||||
- [Google Style Guide](https://google.github.io/styleguide/go/) - Additional practices from Google
|
||||
|
||||
We **recommend** (almost enforce) reviewing these guides before contributing to the codebase. They provide valuable insights into writing idiomatic Go code and will help you understand our approach to backend development. In addition, we have a few additional rules that make certain areas stricter than the above which can be found in area-specific files in this package.
|
||||
We **recommend** (almost enforce) reviewing these guides before contributing to the codebase. They provide valuable insights into writing idiomatic Go code and will help you understand our approach to backend development. In addition, we have a few additional rules that make certain areas stricter than the above which can be found in area-specific files in this package:
|
||||
|
||||
- [Abstractions](abstractions.md) - When to introduce new types and intermediate representations
|
||||
- [Errors](errors.md) - Structured error handling
|
||||
- [Endpoint](endpoint.md) - HTTP endpoint patterns
|
||||
- [Flagger](flagger.md) - Feature flag patterns
|
||||
- [Handler](handler.md) - HTTP handler patterns
|
||||
- [Integration](integration.md) - Integration testing
|
||||
- [Provider](provider.md) - Dependency injection and provider patterns
|
||||
- [Packages](packages.md) - Naming, layout, and conventions for `pkg/` packages
|
||||
- [Service](service.md) - Managed service lifecycle with `factory.Service`
|
||||
- [SQL](sql.md) - Database and SQL patterns
|
||||
|
||||
269
docs/contributing/go/service.md
Normal file
269
docs/contributing/go/service.md
Normal file
@@ -0,0 +1,269 @@
|
||||
# Service
|
||||
|
||||
A service is a component with a managed lifecycle: it starts, runs for the lifetime of the application, and stops gracefully.
|
||||
|
||||
Services are distinct from [providers](provider.md). A provider adapts an external dependency behind an interface. A service has a managed lifecycle that is tied to the lifetime of the application.
|
||||
|
||||
## When do you need a service?
|
||||
|
||||
You need a service when your component needs to do work that outlives a single method call:
|
||||
|
||||
- **Periodic work**: polling an external system, garbage-collecting expired data, syncing state on an interval.
|
||||
- **Graceful shutdown**: holding resources (connections, caches, buffers) that must be flushed or closed before the process exits.
|
||||
- **Blocking on readiness**: waiting for an external dependency to become available before the application can proceed.
|
||||
|
||||
If your component only responds to calls and holds no state that requires cleanup, it is a provider, not a service. If it does both (responds to calls *and* needs a lifecycle), embed `factory.Service` in the provider interface; see [How to create a service](#how-to-create-a-service).
|
||||
|
||||
## The interface
|
||||
|
||||
The `factory.Service` interface in `pkg/factory/service.go` defines two methods:
|
||||
|
||||
```go
|
||||
type Service interface {
|
||||
// Starts a service. It should block and should not return until the service is stopped or it fails.
|
||||
Start(context.Context) error
|
||||
// Stops a service.
|
||||
Stop(context.Context) error
|
||||
}
|
||||
```
|
||||
|
||||
`Start` **must block**. It should not return until the service is stopped (returning `nil`) or something goes wrong (returning an error). If `Start` returns an error, the entire application shuts down.
|
||||
|
||||
`Stop` should cause `Start` to unblock and return. It must be safe to call from a different goroutine than the one running `Start`.
|
||||
|
||||
## Shutdown coordination
|
||||
|
||||
Every service uses a `stopC chan struct{}` to coordinate shutdown:
|
||||
|
||||
- **Constructor**: `stopC: make(chan struct{})`
|
||||
- **Start**: blocks on `<-stopC` (or uses it in a `select` loop)
|
||||
- **Stop**: `close(stopC)` to unblock `Start`
|
||||
|
||||
This is the standard pattern. Do not use `context.WithCancel` or other mechanisms for service-level shutdown coordination. See the examples in the next section.
|
||||
|
||||
## Service shapes
|
||||
|
||||
Two shapes recur across the codebase (these are not exhaustive, if a new shape is needed, bring it up for discussion before going ahead with the implementation), implemented by convention rather than base classes.
|
||||
|
||||
### Idle service
|
||||
|
||||
The service does work during startup or shutdown but has nothing to do while running. `Start` blocks on `<-stopC`. `Stop` closes `stopC` and optionally does cleanup.
|
||||
|
||||
The JWT tokenizer (`pkg/tokenizer/jwttokenizer/provider.go`) is a good example. It validates and creates tokens on demand via method calls, but has no periodic work to do. It still needs the service lifecycle so the registry can manage its lifetime:
|
||||
|
||||
```go
|
||||
// pkg/tokenizer/jwttokenizer/provider.go
|
||||
|
||||
func (provider *provider) Start(ctx context.Context) error {
|
||||
<-provider.stopC
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *provider) Stop(ctx context.Context) error {
|
||||
close(provider.stopC)
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
The instrumentation SDK (`pkg/instrumentation/sdk.go`) is idle while running but does real cleanup in `Stop` shutting down its OpenTelemetry tracer and meter providers:
|
||||
|
||||
```go
|
||||
// pkg/instrumentation/sdk.go
|
||||
|
||||
func (i *SDK) Start(ctx context.Context) error {
|
||||
<-i.startCh
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *SDK) Stop(ctx context.Context) error {
|
||||
close(i.startCh)
|
||||
return errors.Join(
|
||||
i.sdk.Shutdown(ctx),
|
||||
i.meterProviderShutdownFunc(ctx),
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
### Scheduled service
|
||||
|
||||
The service runs an operation repeatedly on a fixed interval. `Start` runs a ticker loop with a `select` on `stopC` and the ticker channel.
|
||||
|
||||
The opaque tokenizer (`pkg/tokenizer/opaquetokenizer/provider.go`) garbage-collects expired tokens and flushes cached last-observed-at timestamps to the database on a configurable interval:
|
||||
|
||||
```go
|
||||
// pkg/tokenizer/opaquetokenizer/provider.go
|
||||
|
||||
func (provider *provider) Start(ctx context.Context) error {
|
||||
ticker := time.NewTicker(provider.config.Opaque.GC.Interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-provider.stopC:
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
orgs, err := provider.orgGetter.ListByOwnedKeyRange(ctx)
|
||||
if err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to get orgs data", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, org := range orgs {
|
||||
if err := provider.gc(ctx, org); err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to garbage collect tokens", "error", err, "org_id", org.ID)
|
||||
}
|
||||
|
||||
if err := provider.flushLastObservedAt(ctx, org); err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to flush tokens", "error", err, "org_id", org.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Its `Stop` does a final gc and flush before returning, so no data is lost on shutdown:
|
||||
|
||||
```go
|
||||
// pkg/tokenizer/opaquetokenizer/provider.go
|
||||
|
||||
func (provider *provider) Stop(ctx context.Context) error {
|
||||
close(provider.stopC)
|
||||
|
||||
orgs, err := provider.orgGetter.ListByOwnedKeyRange(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, org := range orgs {
|
||||
if err := provider.gc(ctx, org); err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to garbage collect tokens", "error", err, "org_id", org.ID)
|
||||
}
|
||||
|
||||
if err := provider.flushLastObservedAt(ctx, org); err != nil {
|
||||
provider.settings.Logger().ErrorContext(ctx, "failed to flush tokens", "error", err, "org_id", org.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
The key points:
|
||||
|
||||
- In the loop, `select` on `stopC` and the ticker. Errors in iterations are logged but do not cause the service to return (which would shut down the application).
|
||||
- Only return an error from `Start` if the failure is unrecoverable.
|
||||
- Use `Stop` to flush or drain any in-memory state before the process exits.
|
||||
|
||||
## How to create a service
|
||||
|
||||
There are two cases: a standalone service and a provider that is also a service.
|
||||
|
||||
### Standalone service
|
||||
|
||||
A standalone service only has the `factory.Service` lifecycle i.e it does not serve as a dependency for other packages. The user reconciliation service is an example.
|
||||
|
||||
1. Define the service interface in your package. Embed `factory.Service`:
|
||||
|
||||
```go
|
||||
// pkg/modules/user/service.go
|
||||
package user
|
||||
|
||||
type Service interface {
|
||||
factory.Service
|
||||
}
|
||||
```
|
||||
|
||||
2. Create the implementation in an `impl` sub-package. Use an unexported struct with an exported constructor that returns the interface:
|
||||
|
||||
```go
|
||||
// pkg/modules/user/impluser/service.go
|
||||
package impluser
|
||||
|
||||
type service struct {
|
||||
settings factory.ScopedProviderSettings
|
||||
// ... dependencies ...
|
||||
stopC chan struct{}
|
||||
}
|
||||
|
||||
func NewService(
|
||||
providerSettings factory.ProviderSettings,
|
||||
// ... dependencies ...
|
||||
) user.Service {
|
||||
return &service{
|
||||
settings: factory.NewScopedProviderSettings(providerSettings, "go.signoz.io/pkg/modules/user"),
|
||||
// ... dependencies ...
|
||||
stopC: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *service) Start(ctx context.Context) error { ... }
|
||||
func (s *service) Stop(ctx context.Context) error { ... }
|
||||
```
|
||||
|
||||
### Provider that is also a service
|
||||
|
||||
Many providers need a managed lifecycle: they poll, sync, or garbage-collect in the background. In this case, embed `factory.Service` in the provider interface. The implementation satisfies both the provider methods and `Start`/`Stop`.
|
||||
|
||||
```go
|
||||
// pkg/tokenizer/tokenizer.go
|
||||
package tokenizer
|
||||
|
||||
type Tokenizer interface {
|
||||
factory.Service
|
||||
CreateToken(context.Context, *authtypes.Identity, map[string]string) (*authtypes.Token, error)
|
||||
GetIdentity(context.Context, string) (*authtypes.Identity, error)
|
||||
// ... other methods ...
|
||||
}
|
||||
```
|
||||
|
||||
The implementation (e.g. `pkg/tokenizer/opaquetokenizer/provider.go`) implements `Start`, `Stop`, and all the provider methods on the same struct. See the [provider guide](provider.md) for how to set up the factory, config, and constructor. The `stopC` channel and `Start`/`Stop` methods follow the same patterns described above.
|
||||
|
||||
## How to wire it up
|
||||
|
||||
Wiring happens in `pkg/signoz/signoz.go`.
|
||||
|
||||
### 1. Instantiate the service
|
||||
|
||||
For a standalone service, call the constructor directly:
|
||||
|
||||
```go
|
||||
userService := impluser.NewService(providerSettings, store, module, orgGetter, authz, config.User.Root)
|
||||
```
|
||||
|
||||
For a provider that is also a service, use `factory.NewProviderFromNamedMap` as described in the [provider guide](provider.md). The returned value already implements `factory.Service`.
|
||||
|
||||
### 2. Register in the registry
|
||||
|
||||
Wrap the service with `factory.NewNamedService` and pass it to `factory.NewRegistry`:
|
||||
|
||||
```go
|
||||
registry, err := factory.NewRegistry(
|
||||
instrumentation.Logger(),
|
||||
// ... other services ...
|
||||
factory.NewNamedService(factory.MustNewName("user"), userService),
|
||||
)
|
||||
```
|
||||
|
||||
The name must be unique across all services. The registry handles the rest:
|
||||
|
||||
- **Start**: launches all services concurrently in goroutines.
|
||||
- **Wait**: blocks until a service returns an error, the context is cancelled, or a SIGINT/SIGTERM is received. Any service error triggers application shutdown.
|
||||
- **Stop**: stops all services concurrently, collects errors via `errors.Join`.
|
||||
|
||||
You do not call `Start` or `Stop` on individual services. The registry does it.
|
||||
|
||||
## What should I remember?
|
||||
|
||||
- A service has a managed lifecycle: `Start` blocks, `Stop` unblocks it.
|
||||
- Use `stopC chan struct{}` for shutdown coordination. `close(stopC)` in `Stop`, `<-stopC` in `Start`.
|
||||
- Service shapes: idle (block on `stopC`) and scheduled (ticker loop with `select`).
|
||||
- Unexported struct, exported `NewService` constructor returning the interface.
|
||||
- First constructor parameter is `factory.ProviderSettings`. Create scoped settings with `factory.NewScopedProviderSettings`.
|
||||
- Register in `factory.Registry` with `factory.NewNamedService`. The registry starts and stops everything.
|
||||
- Only return an error from `Start` if the failure is unrecoverable. Log and continue for transient errors in polling loops.
|
||||
|
||||
## Further reading
|
||||
|
||||
- [Google Guava - ServiceExplained](https://github.com/google/guava/wiki/ServiceExplained) - the service lifecycle pattern takes inspiration from this
|
||||
- [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) - Worth studying for its approach to building composable components
|
||||
@@ -19,6 +19,8 @@ const config: Config.InitialOptions = {
|
||||
'^.*/useSafeNavigate$': USE_SAFE_NAVIGATE_MOCK_PATH,
|
||||
'^@signozhq/icons$':
|
||||
'<rootDir>/node_modules/@signozhq/icons/dist/index.esm.js',
|
||||
'^react-syntax-highlighter/dist/esm/(.*)$':
|
||||
'<rootDir>/node_modules/react-syntax-highlighter/dist/cjs/$1',
|
||||
},
|
||||
globals: {
|
||||
extensionsToTreatAsEsm: ['.ts'],
|
||||
|
||||
@@ -214,7 +214,7 @@
|
||||
"@types/react-redux": "^7.1.11",
|
||||
"@types/react-resizable": "3.0.3",
|
||||
"@types/react-router-dom": "^5.1.6",
|
||||
"@types/react-syntax-highlighter": "15.5.7",
|
||||
"@types/react-syntax-highlighter": "15.5.13",
|
||||
"@types/redux-mock-store": "1.0.4",
|
||||
"@types/styled-components": "^5.1.4",
|
||||
"@types/uuid": "^8.3.1",
|
||||
|
||||
13
frontend/scripts/extract-md-languages.sh
Executable file
13
frontend/scripts/extract-md-languages.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
# Extracts unique fenced code block language identifiers from all .md files under frontend/src/
|
||||
# Usage: bash frontend/scripts/extract-md-languages.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
SRC_DIR="$SCRIPT_DIR/../src"
|
||||
|
||||
grep -roh '```[a-zA-Z0-9_+-]*' "$SRC_DIR" --include='*.md' \
|
||||
| sed 's/^```//' \
|
||||
| grep -v '^$' \
|
||||
| sort -u
|
||||
35
frontend/scripts/validate-md-languages.sh
Executable file
35
frontend/scripts/validate-md-languages.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
# Validates that all fenced code block languages used in .md files are registered
|
||||
# in the syntax highlighter.
|
||||
# Usage: bash frontend/scripts/validate-md-languages.sh
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
SYNTAX_HIGHLIGHTER="$SCRIPT_DIR/../src/components/MarkdownRenderer/syntaxHighlighter.ts"
|
||||
|
||||
# Get all languages used in .md files
|
||||
md_languages=$("$SCRIPT_DIR/extract-md-languages.sh")
|
||||
|
||||
# Get all registered languages from syntaxHighlighter.ts
|
||||
registered_languages=$(grep -oP "registerLanguage\('\K[^']+" "$SYNTAX_HIGHLIGHTER" | sort -u)
|
||||
|
||||
missing_languages=()
|
||||
|
||||
for lang in $md_languages; do
|
||||
if ! echo "$registered_languages" | grep -qx "$lang"; then
|
||||
missing_languages+=("$lang")
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#missing_languages[@]} -gt 0 ]; then
|
||||
echo "Error: The following languages are used in .md files but not registered in syntaxHighlighter.ts:"
|
||||
for lang in "${missing_languages[@]}"; do
|
||||
echo " - $lang"
|
||||
done
|
||||
echo ""
|
||||
echo "Please add them to: frontend/src/components/MarkdownRenderer/syntaxHighlighter.ts"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "All markdown code block languages are registered in syntaxHighlighter.ts"
|
||||
@@ -20,11 +20,8 @@ import { useMutation, useQuery } from 'react-query';
|
||||
import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
|
||||
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
|
||||
import type {
|
||||
HandleExportRawDataGETParams,
|
||||
HandleExportRawDataPOSTParams,
|
||||
ListPromotedAndIndexedPaths200,
|
||||
PromotetypesPromotePathDTO,
|
||||
Querybuildertypesv5QueryRangeRequestDTO,
|
||||
RenderErrorResponseDTO,
|
||||
} from '../sigNoz.schemas';
|
||||
|
||||
@@ -32,206 +29,6 @@ type AwaitedInput<T> = PromiseLike<T> | T;
|
||||
|
||||
type Awaited<O> = O extends AwaitedInput<infer T> ? T : never;
|
||||
|
||||
/**
|
||||
* This endpoints allows simple query exporting raw data for traces and logs
|
||||
* @summary Export raw data
|
||||
*/
|
||||
export const handleExportRawDataGET = (
|
||||
params?: HandleExportRawDataGETParams,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<string>({
|
||||
url: `/api/v1/export_raw_data`,
|
||||
method: 'GET',
|
||||
params,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getHandleExportRawDataGETQueryKey = (
|
||||
params?: HandleExportRawDataGETParams,
|
||||
) => {
|
||||
return [`/api/v1/export_raw_data`, ...(params ? [params] : [])] as const;
|
||||
};
|
||||
|
||||
export const getHandleExportRawDataGETQueryOptions = <
|
||||
TData = Awaited<ReturnType<typeof handleExportRawDataGET>>,
|
||||
TError = ErrorType<RenderErrorResponseDTO>
|
||||
>(
|
||||
params?: HandleExportRawDataGETParams,
|
||||
options?: {
|
||||
query?: UseQueryOptions<
|
||||
Awaited<ReturnType<typeof handleExportRawDataGET>>,
|
||||
TError,
|
||||
TData
|
||||
>;
|
||||
},
|
||||
) => {
|
||||
const { query: queryOptions } = options ?? {};
|
||||
|
||||
const queryKey =
|
||||
queryOptions?.queryKey ?? getHandleExportRawDataGETQueryKey(params);
|
||||
|
||||
const queryFn: QueryFunction<
|
||||
Awaited<ReturnType<typeof handleExportRawDataGET>>
|
||||
> = ({ signal }) => handleExportRawDataGET(params, signal);
|
||||
|
||||
return { queryKey, queryFn, ...queryOptions } as UseQueryOptions<
|
||||
Awaited<ReturnType<typeof handleExportRawDataGET>>,
|
||||
TError,
|
||||
TData
|
||||
> & { queryKey: QueryKey };
|
||||
};
|
||||
|
||||
export type HandleExportRawDataGETQueryResult = NonNullable<
|
||||
Awaited<ReturnType<typeof handleExportRawDataGET>>
|
||||
>;
|
||||
export type HandleExportRawDataGETQueryError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary Export raw data
|
||||
*/
|
||||
|
||||
export function useHandleExportRawDataGET<
|
||||
TData = Awaited<ReturnType<typeof handleExportRawDataGET>>,
|
||||
TError = ErrorType<RenderErrorResponseDTO>
|
||||
>(
|
||||
params?: HandleExportRawDataGETParams,
|
||||
options?: {
|
||||
query?: UseQueryOptions<
|
||||
Awaited<ReturnType<typeof handleExportRawDataGET>>,
|
||||
TError,
|
||||
TData
|
||||
>;
|
||||
},
|
||||
): UseQueryResult<TData, TError> & { queryKey: QueryKey } {
|
||||
const queryOptions = getHandleExportRawDataGETQueryOptions(params, options);
|
||||
|
||||
const query = useQuery(queryOptions) as UseQueryResult<TData, TError> & {
|
||||
queryKey: QueryKey;
|
||||
};
|
||||
|
||||
query.queryKey = queryOptions.queryKey;
|
||||
|
||||
return query;
|
||||
}
|
||||
|
||||
/**
|
||||
* @summary Export raw data
|
||||
*/
|
||||
export const invalidateHandleExportRawDataGET = async (
|
||||
queryClient: QueryClient,
|
||||
params?: HandleExportRawDataGETParams,
|
||||
options?: InvalidateOptions,
|
||||
): Promise<QueryClient> => {
|
||||
await queryClient.invalidateQueries(
|
||||
{ queryKey: getHandleExportRawDataGETQueryKey(params) },
|
||||
options,
|
||||
);
|
||||
|
||||
return queryClient;
|
||||
};
|
||||
|
||||
/**
|
||||
* This endpoints allows complex query exporting raw data for traces and logs
|
||||
* @summary Export raw data
|
||||
*/
|
||||
export const handleExportRawDataPOST = (
|
||||
querybuildertypesv5QueryRangeRequestDTO: BodyType<Querybuildertypesv5QueryRangeRequestDTO>,
|
||||
params?: HandleExportRawDataPOSTParams,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<string>({
|
||||
url: `/api/v1/export_raw_data`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: querybuildertypesv5QueryRangeRequestDTO,
|
||||
params,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getHandleExportRawDataPOSTMutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof handleExportRawDataPOST>>,
|
||||
TError,
|
||||
{
|
||||
data: BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
|
||||
params?: HandleExportRawDataPOSTParams;
|
||||
},
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof handleExportRawDataPOST>>,
|
||||
TError,
|
||||
{
|
||||
data: BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
|
||||
params?: HandleExportRawDataPOSTParams;
|
||||
},
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['handleExportRawDataPOST'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof handleExportRawDataPOST>>,
|
||||
{
|
||||
data: BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
|
||||
params?: HandleExportRawDataPOSTParams;
|
||||
}
|
||||
> = (props) => {
|
||||
const { data, params } = props ?? {};
|
||||
|
||||
return handleExportRawDataPOST(data, params);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type HandleExportRawDataPOSTMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof handleExportRawDataPOST>>
|
||||
>;
|
||||
export type HandleExportRawDataPOSTMutationBody = BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
|
||||
export type HandleExportRawDataPOSTMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary Export raw data
|
||||
*/
|
||||
export const useHandleExportRawDataPOST = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof handleExportRawDataPOST>>,
|
||||
TError,
|
||||
{
|
||||
data: BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
|
||||
params?: HandleExportRawDataPOSTParams;
|
||||
},
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof handleExportRawDataPOST>>,
|
||||
TError,
|
||||
{
|
||||
data: BodyType<Querybuildertypesv5QueryRangeRequestDTO>;
|
||||
params?: HandleExportRawDataPOSTParams;
|
||||
},
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getHandleExportRawDataPOSTMutationOptions(options);
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
/**
|
||||
* This endpoints promotes and indexes paths
|
||||
* @summary Promote and index paths
|
||||
|
||||
@@ -2732,76 +2732,6 @@ export type DeleteAuthDomainPathParameters = {
|
||||
export type UpdateAuthDomainPathParameters = {
|
||||
id: string;
|
||||
};
|
||||
export type HandleExportRawDataGETParams = {
|
||||
/**
|
||||
* @enum csv,jsonl
|
||||
* @type string
|
||||
* @description undefined
|
||||
*/
|
||||
format?: HandleExportRawDataGETFormat;
|
||||
/**
|
||||
* @enum logs,traces
|
||||
* @type string
|
||||
* @description undefined
|
||||
*/
|
||||
source?: HandleExportRawDataGETSource;
|
||||
/**
|
||||
* @type integer
|
||||
* @minimum 0
|
||||
* @description undefined
|
||||
*/
|
||||
start?: number;
|
||||
/**
|
||||
* @type integer
|
||||
* @minimum 0
|
||||
* @description undefined
|
||||
*/
|
||||
end?: number;
|
||||
/**
|
||||
* @type integer
|
||||
* @maximum 50000
|
||||
* @minimum 1
|
||||
* @description undefined
|
||||
*/
|
||||
limit?: number;
|
||||
/**
|
||||
* @type string
|
||||
* @description undefined
|
||||
*/
|
||||
filter?: string;
|
||||
/**
|
||||
* @type array
|
||||
* @description undefined
|
||||
*/
|
||||
columns?: string[];
|
||||
/**
|
||||
* @type string
|
||||
* @description undefined
|
||||
*/
|
||||
order_by?: string;
|
||||
};
|
||||
|
||||
export enum HandleExportRawDataGETFormat {
|
||||
csv = 'csv',
|
||||
jsonl = 'jsonl',
|
||||
}
|
||||
export enum HandleExportRawDataGETSource {
|
||||
logs = 'logs',
|
||||
traces = 'traces',
|
||||
}
|
||||
export type HandleExportRawDataPOSTParams = {
|
||||
/**
|
||||
* @enum csv,jsonl
|
||||
* @type string
|
||||
* @description undefined
|
||||
*/
|
||||
format?: HandleExportRawDataPOSTFormat;
|
||||
};
|
||||
|
||||
export enum HandleExportRawDataPOSTFormat {
|
||||
csv = 'csv',
|
||||
jsonl = 'jsonl',
|
||||
}
|
||||
export type GetFieldsKeysParams = {
|
||||
/**
|
||||
* @description undefined
|
||||
@@ -3301,6 +3231,11 @@ export type ListMetricsParams = {
|
||||
* @description undefined
|
||||
*/
|
||||
searchText?: string;
|
||||
/**
|
||||
* @type string
|
||||
* @description undefined
|
||||
*/
|
||||
source?: string;
|
||||
};
|
||||
|
||||
export type ListMetrics200 = {
|
||||
|
||||
@@ -2,13 +2,12 @@
|
||||
|
||||
import ReactMarkdown from 'react-markdown';
|
||||
import { CodeProps } from 'react-markdown/lib/ast-to-react';
|
||||
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter';
|
||||
import { a11yDark } from 'react-syntax-highlighter/dist/cjs/styles/prism';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
import { isEmpty } from 'lodash-es';
|
||||
import rehypeRaw from 'rehype-raw';
|
||||
|
||||
import CodeCopyBtn from './CodeCopyBtn/CodeCopyBtn';
|
||||
import SyntaxHighlighter, { a11yDark } from './syntaxHighlighter';
|
||||
|
||||
interface LinkProps {
|
||||
href: string;
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
import { PrismLight as SyntaxHighlighter } from 'react-syntax-highlighter';
|
||||
import bash from 'react-syntax-highlighter/dist/esm/languages/prism/bash';
|
||||
import docker from 'react-syntax-highlighter/dist/esm/languages/prism/docker';
|
||||
import elixir from 'react-syntax-highlighter/dist/esm/languages/prism/elixir';
|
||||
import go from 'react-syntax-highlighter/dist/esm/languages/prism/go';
|
||||
import javascript from 'react-syntax-highlighter/dist/esm/languages/prism/javascript';
|
||||
import json from 'react-syntax-highlighter/dist/esm/languages/prism/json';
|
||||
import jsx from 'react-syntax-highlighter/dist/esm/languages/prism/jsx';
|
||||
import rust from 'react-syntax-highlighter/dist/esm/languages/prism/rust';
|
||||
import swift from 'react-syntax-highlighter/dist/esm/languages/prism/swift';
|
||||
import tsx from 'react-syntax-highlighter/dist/esm/languages/prism/tsx';
|
||||
import typescript from 'react-syntax-highlighter/dist/esm/languages/prism/typescript';
|
||||
import yaml from 'react-syntax-highlighter/dist/esm/languages/prism/yaml';
|
||||
import a11yDark from 'react-syntax-highlighter/dist/esm/styles/prism/a11y-dark';
|
||||
|
||||
SyntaxHighlighter.registerLanguage('bash', bash);
|
||||
SyntaxHighlighter.registerLanguage('docker', docker);
|
||||
SyntaxHighlighter.registerLanguage('dockerfile', docker);
|
||||
SyntaxHighlighter.registerLanguage('elixir', elixir);
|
||||
SyntaxHighlighter.registerLanguage('go', go);
|
||||
SyntaxHighlighter.registerLanguage('javascript', javascript);
|
||||
SyntaxHighlighter.registerLanguage('js', javascript);
|
||||
SyntaxHighlighter.registerLanguage('json', json);
|
||||
SyntaxHighlighter.registerLanguage('jsx', jsx);
|
||||
SyntaxHighlighter.registerLanguage('rust', rust);
|
||||
SyntaxHighlighter.registerLanguage('swift', swift);
|
||||
SyntaxHighlighter.registerLanguage('ts', typescript);
|
||||
SyntaxHighlighter.registerLanguage('tsx', tsx);
|
||||
SyntaxHighlighter.registerLanguage('typescript', typescript);
|
||||
SyntaxHighlighter.registerLanguage('yaml', yaml);
|
||||
SyntaxHighlighter.registerLanguage('yml', yaml);
|
||||
|
||||
export default SyntaxHighlighter;
|
||||
export { a11yDark };
|
||||
@@ -60,11 +60,30 @@
|
||||
gap: 8px;
|
||||
|
||||
margin-left: 108px;
|
||||
position: relative;
|
||||
|
||||
/* Vertical dashed line connecting query elements */
|
||||
&::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
left: -28px;
|
||||
top: 0;
|
||||
bottom: 0;
|
||||
width: 1px;
|
||||
background: repeating-linear-gradient(
|
||||
to bottom,
|
||||
#1d212d,
|
||||
#1d212d 4px,
|
||||
transparent 4px,
|
||||
transparent 8px
|
||||
);
|
||||
}
|
||||
|
||||
.code-mirror-where-clause,
|
||||
.query-aggregation-container,
|
||||
.query-add-ons,
|
||||
.metrics-aggregation-section-content {
|
||||
.metrics-aggregation-section-content,
|
||||
.metrics-container {
|
||||
position: relative;
|
||||
|
||||
&::before {
|
||||
@@ -102,6 +121,10 @@
|
||||
.qb-elements-container {
|
||||
margin-left: 0px;
|
||||
|
||||
&::after {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.code-mirror-where-clause,
|
||||
.query-aggregation-container,
|
||||
.query-add-ons,
|
||||
@@ -333,28 +356,7 @@
|
||||
text-transform: uppercase;
|
||||
|
||||
&::before {
|
||||
content: '';
|
||||
height: 120px;
|
||||
content: '';
|
||||
position: absolute;
|
||||
left: 0;
|
||||
top: 31px;
|
||||
bottom: 0;
|
||||
width: 1px;
|
||||
background: repeating-linear-gradient(
|
||||
to bottom,
|
||||
#1d212d,
|
||||
#1d212d 4px,
|
||||
transparent 4px,
|
||||
transparent 8px
|
||||
);
|
||||
left: 15px;
|
||||
}
|
||||
|
||||
&.has-trace-operator {
|
||||
&::before {
|
||||
height: 0px;
|
||||
}
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -462,10 +464,21 @@
|
||||
|
||||
.qb-content-section {
|
||||
.qb-elements-container {
|
||||
&::after {
|
||||
background: repeating-linear-gradient(
|
||||
to bottom,
|
||||
var(--bg-vanilla-300),
|
||||
var(--bg-vanilla-300) 4px,
|
||||
transparent 4px,
|
||||
transparent 8px
|
||||
);
|
||||
}
|
||||
|
||||
.code-mirror-where-clause,
|
||||
.query-aggregation-container,
|
||||
.query-add-ons,
|
||||
.metrics-aggregation-section-content {
|
||||
.metrics-aggregation-section-content,
|
||||
.metrics-container {
|
||||
&::before {
|
||||
border-left: 6px dotted var(--bg-vanilla-300);
|
||||
}
|
||||
@@ -529,18 +542,6 @@
|
||||
|
||||
.qb-entity-options {
|
||||
.options {
|
||||
.query-name {
|
||||
&::before {
|
||||
background: repeating-linear-gradient(
|
||||
to bottom,
|
||||
var(--bg-vanilla-300),
|
||||
var(--bg-vanilla-300) 4px,
|
||||
transparent 4px,
|
||||
transparent 8px
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
.formula-name {
|
||||
&::before {
|
||||
background: repeating-linear-gradient(
|
||||
|
||||
@@ -207,6 +207,7 @@ export const QueryBuilderV2 = memo(function QueryBuilderV2({
|
||||
queryVariant={config?.queryVariant || 'dropdown'}
|
||||
showOnlyWhereClause={showOnlyWhereClause}
|
||||
isListViewPanel={isListViewPanel}
|
||||
signalSource={currentQuery.builder.queryData[0].source as 'meter' | ''}
|
||||
onSignalSourceChange={onSignalSourceChange || ((): void => {})}
|
||||
signalSourceChangeEnabled={signalSourceChangeEnabled}
|
||||
queriesCount={1}
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import { memo, useCallback, useMemo, useState } from 'react';
|
||||
import { memo, useCallback, useMemo } from 'react';
|
||||
import { Select } from 'antd';
|
||||
import {
|
||||
initialQueriesMap,
|
||||
initialQueryMeterWithType,
|
||||
PANEL_TYPES,
|
||||
} from 'constants/queryBuilder';
|
||||
import { AggregatorFilter } from 'container/QueryBuilder/filters';
|
||||
import { MetricNameSelector } from 'container/QueryBuilder/filters';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
import { SelectOption } from 'types/common/select';
|
||||
@@ -44,21 +43,12 @@ export const MetricsSelect = memo(function MetricsSelect({
|
||||
signalSourceChangeEnabled: boolean;
|
||||
savePreviousQuery: boolean;
|
||||
}): JSX.Element {
|
||||
const [attributeKeys, setAttributeKeys] = useState<BaseAutocompleteData[]>([]);
|
||||
|
||||
const { handleChangeAggregatorAttribute } = useQueryOperations({
|
||||
index,
|
||||
query,
|
||||
entityVersion: version,
|
||||
});
|
||||
|
||||
const handleAggregatorAttributeChange = useCallback(
|
||||
(value: BaseAutocompleteData, isEditMode?: boolean) => {
|
||||
handleChangeAggregatorAttribute(value, isEditMode, attributeKeys || []);
|
||||
},
|
||||
[handleChangeAggregatorAttribute, attributeKeys],
|
||||
);
|
||||
|
||||
const {
|
||||
updateAllQueriesOperators,
|
||||
handleSetQueryData,
|
||||
@@ -164,12 +154,10 @@ export const MetricsSelect = memo(function MetricsSelect({
|
||||
/>
|
||||
)}
|
||||
|
||||
<AggregatorFilter
|
||||
onChange={handleAggregatorAttributeChange}
|
||||
<MetricNameSelector
|
||||
onChange={handleChangeAggregatorAttribute}
|
||||
query={query}
|
||||
index={index}
|
||||
signalSource={signalSource || ''}
|
||||
setAttributeKeys={setAttributeKeys}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -202,8 +202,8 @@ function QueryAddOns({
|
||||
} else {
|
||||
filteredAddOns = Object.values(ADD_ONS);
|
||||
|
||||
// Filter out group_by for metrics data source
|
||||
if (query.dataSource === DataSource.METRICS) {
|
||||
// Filter out group_by for metrics data source (handled in MetricsAggregateSection)
|
||||
filteredAddOns = filteredAddOns.filter(
|
||||
(addOn) => addOn.key !== ADD_ONS_KEYS.GROUP_BY,
|
||||
);
|
||||
|
||||
@@ -43,6 +43,7 @@ jest.mock(
|
||||
);
|
||||
jest.mock('container/QueryBuilder/filters', () => ({
|
||||
AggregatorFilter: (): JSX.Element => <div />,
|
||||
MetricNameSelector: (): JSX.Element => <div />,
|
||||
}));
|
||||
// Mock hooks
|
||||
jest.mock('hooks/queryBuilder/useQueryBuilder');
|
||||
|
||||
@@ -177,7 +177,7 @@ export const initialQueryBuilderFormValues: IBuilderQuery = {
|
||||
{
|
||||
metricName: '',
|
||||
temporality: '',
|
||||
timeAggregation: MetricAggregateOperator.COUNT,
|
||||
timeAggregation: MetricAggregateOperator.AVG,
|
||||
spaceAggregation: MetricAggregateOperator.SUM,
|
||||
reduceTo: ReduceOperators.AVG,
|
||||
},
|
||||
@@ -225,7 +225,7 @@ export const initialQueryBuilderFormMeterValues: IBuilderQuery = {
|
||||
{
|
||||
metricName: '',
|
||||
temporality: '',
|
||||
timeAggregation: MeterAggregateOperator.COUNT,
|
||||
timeAggregation: MeterAggregateOperator.AVG,
|
||||
spaceAggregation: MeterAggregateOperator.SUM,
|
||||
reduceTo: ReduceOperators.AVG,
|
||||
},
|
||||
|
||||
@@ -441,7 +441,7 @@ describe('Footer utils', () => {
|
||||
reduceTo: undefined,
|
||||
spaceAggregation: 'sum',
|
||||
temporality: undefined,
|
||||
timeAggregation: 'count',
|
||||
timeAggregation: 'avg',
|
||||
},
|
||||
],
|
||||
disabled: false,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { useState } from 'react';
|
||||
import { CloudDownloadOutlined } from '@ant-design/icons';
|
||||
import { Button, Dropdown, MenuProps } from 'antd';
|
||||
import { Excel } from 'antd-table-saveas-excel';
|
||||
import { unparse } from 'papaparse';
|
||||
|
||||
import { DownloadProps } from './Download.types';
|
||||
@@ -8,25 +8,36 @@ import { DownloadProps } from './Download.types';
|
||||
import './Download.styles.scss';
|
||||
|
||||
function Download({ data, isLoading, fileName }: DownloadProps): JSX.Element {
|
||||
const downloadExcelFile = (): void => {
|
||||
const headers = Object.keys(Object.assign({}, ...data)).map((item) => {
|
||||
const updatedTitle = item
|
||||
.split('_')
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(' ');
|
||||
return {
|
||||
title: updatedTitle,
|
||||
dataIndex: item,
|
||||
};
|
||||
});
|
||||
const excel = new Excel();
|
||||
excel
|
||||
.addSheet(fileName)
|
||||
.addColumns(headers)
|
||||
.addDataSource(data, {
|
||||
str2Percent: true,
|
||||
})
|
||||
.saveAs(`${fileName}.xlsx`);
|
||||
const [isDownloading, setIsDownloading] = useState(false);
|
||||
|
||||
const downloadExcelFile = async (): Promise<void> => {
|
||||
setIsDownloading(true);
|
||||
|
||||
try {
|
||||
const headers = Object.keys(Object.assign({}, ...data)).map((item) => {
|
||||
const updatedTitle = item
|
||||
.split('_')
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(' ');
|
||||
return {
|
||||
title: updatedTitle,
|
||||
dataIndex: item,
|
||||
};
|
||||
});
|
||||
|
||||
const excelLib = await import('antd-table-saveas-excel');
|
||||
|
||||
const excel = new excelLib.Excel();
|
||||
excel
|
||||
.addSheet(fileName)
|
||||
.addColumns(headers)
|
||||
.addDataSource(data, {
|
||||
str2Percent: true,
|
||||
})
|
||||
.saveAs(`${fileName}.xlsx`);
|
||||
} finally {
|
||||
setIsDownloading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const downloadCsvFile = (): void => {
|
||||
@@ -59,7 +70,7 @@ function Download({ data, isLoading, fileName }: DownloadProps): JSX.Element {
|
||||
<Dropdown menu={menu} trigger={['click']}>
|
||||
<Button
|
||||
className="download-button"
|
||||
loading={isLoading}
|
||||
loading={isLoading || isDownloading}
|
||||
size="small"
|
||||
type="link"
|
||||
>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { useState } from 'react';
|
||||
import { Button, Popover, Typography } from 'antd';
|
||||
import { Excel } from 'antd-table-saveas-excel';
|
||||
import { FileDigit, FileDown, Sheet } from 'lucide-react';
|
||||
import { unparse } from 'papaparse';
|
||||
|
||||
@@ -8,25 +8,34 @@ import { DownloadProps } from './DownloadV2.types';
|
||||
import './DownloadV2.styles.scss';
|
||||
|
||||
function Download({ data, isLoading, fileName }: DownloadProps): JSX.Element {
|
||||
const downloadExcelFile = (): void => {
|
||||
const headers = Object.keys(Object.assign({}, ...data)).map((item) => {
|
||||
const updatedTitle = item
|
||||
.split('_')
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(' ');
|
||||
return {
|
||||
title: updatedTitle,
|
||||
dataIndex: item,
|
||||
};
|
||||
});
|
||||
const excel = new Excel();
|
||||
excel
|
||||
.addSheet(fileName)
|
||||
.addColumns(headers)
|
||||
.addDataSource(data, {
|
||||
str2Percent: true,
|
||||
})
|
||||
.saveAs(`${fileName}.xlsx`);
|
||||
const [isDownloading, setIsDownloading] = useState(false);
|
||||
|
||||
const downloadExcelFile = async (): Promise<void> => {
|
||||
setIsDownloading(true);
|
||||
|
||||
try {
|
||||
const headers = Object.keys(Object.assign({}, ...data)).map((item) => {
|
||||
const updatedTitle = item
|
||||
.split('_')
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(' ');
|
||||
return {
|
||||
title: updatedTitle,
|
||||
dataIndex: item,
|
||||
};
|
||||
});
|
||||
const excelLib = await import('antd-table-saveas-excel');
|
||||
const excel = new excelLib.Excel();
|
||||
excel
|
||||
.addSheet(fileName)
|
||||
.addColumns(headers)
|
||||
.addDataSource(data, {
|
||||
str2Percent: true,
|
||||
})
|
||||
.saveAs(`${fileName}.xlsx`);
|
||||
} finally {
|
||||
setIsDownloading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const downloadCsvFile = (): void => {
|
||||
@@ -54,6 +63,7 @@ function Download({ data, isLoading, fileName }: DownloadProps): JSX.Element {
|
||||
type="text"
|
||||
onClick={downloadExcelFile}
|
||||
className="action-btns"
|
||||
loading={isDownloading}
|
||||
>
|
||||
Excel (.xlsx)
|
||||
</Button>
|
||||
|
||||
@@ -6,6 +6,7 @@ import { isAxiosError } from 'axios';
|
||||
import { ENTITY_VERSION_V5 } from 'constants/app';
|
||||
import { initialQueryMeterWithType, PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
|
||||
import EmptyMetricsSearch from 'container/MetricsExplorer/Explorer/EmptyMetricsSearch';
|
||||
import { BuilderUnitsFilter } from 'container/QueryBuilder/filters/BuilderUnitsFilter';
|
||||
import TimeSeriesView from 'container/TimeSeriesView/TimeSeriesView';
|
||||
import { convertDataValueToMs } from 'container/TimeSeriesView/utils';
|
||||
@@ -115,27 +116,34 @@ function TimeSeries(): JSX.Element {
|
||||
setYAxisUnit(value);
|
||||
};
|
||||
|
||||
const hasMetricSelected = useMemo(
|
||||
() => currentQuery.builder.queryData.some((q) => q.aggregateAttribute?.key),
|
||||
[currentQuery],
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="meter-time-series-container">
|
||||
<BuilderUnitsFilter onChange={onUnitChangeHandler} yAxisUnit={yAxisUnit} />
|
||||
<div className="time-series-container">
|
||||
{responseData.map((datapoint, index) => (
|
||||
<div
|
||||
className="time-series-view-panel"
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
key={index}
|
||||
>
|
||||
<TimeSeriesView
|
||||
isFilterApplied={false}
|
||||
isError={queries[index].isError}
|
||||
isLoading={queries[index].isLoading}
|
||||
data={datapoint}
|
||||
dataSource={DataSource.METRICS}
|
||||
yAxisUnit={yAxisUnit}
|
||||
panelType={PANEL_TYPES.BAR}
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
{!hasMetricSelected && <EmptyMetricsSearch />}
|
||||
{hasMetricSelected &&
|
||||
responseData.map((datapoint, index) => (
|
||||
<div
|
||||
className="time-series-view-panel"
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
key={index}
|
||||
>
|
||||
<TimeSeriesView
|
||||
isFilterApplied={false}
|
||||
isError={queries[index].isError}
|
||||
isLoading={queries[index].isLoading}
|
||||
data={datapoint}
|
||||
dataSource={DataSource.METRICS}
|
||||
yAxisUnit={yAxisUnit}
|
||||
panelType={PANEL_TYPES.BAR}
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,13 +1,21 @@
|
||||
import { Typography } from 'antd';
|
||||
import { Empty } from 'antd/lib';
|
||||
|
||||
export default function EmptyMetricsSearch(): JSX.Element {
|
||||
interface EmptyMetricsSearchProps {
|
||||
hasQueryResult?: boolean;
|
||||
}
|
||||
|
||||
export default function EmptyMetricsSearch({
|
||||
hasQueryResult,
|
||||
}: EmptyMetricsSearchProps): JSX.Element {
|
||||
return (
|
||||
<div className="empty-metrics-search">
|
||||
<Empty
|
||||
description={
|
||||
<Typography.Title level={5}>
|
||||
Please build and run a valid query to see the result
|
||||
{hasQueryResult
|
||||
? 'No data'
|
||||
: 'Select a metric and run a query to see the results'}
|
||||
</Typography.Title>
|
||||
}
|
||||
/>
|
||||
|
||||
@@ -69,7 +69,7 @@ function Explorer(): JSX.Element {
|
||||
!isMetricUnitsLoading &&
|
||||
!isMetricUnitsError &&
|
||||
units.length > 0 &&
|
||||
units.every((unit) => unit && unit === units[0]),
|
||||
units.every((unit) => unit === units[0]),
|
||||
[units, isMetricUnitsLoading, isMetricUnitsError],
|
||||
);
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
import { GlobalReducer } from 'types/reducer/globalTime';
|
||||
|
||||
import EmptyMetricsSearch from './EmptyMetricsSearch';
|
||||
import { TimeSeriesProps } from './types';
|
||||
import {
|
||||
buildUpdateMetricYAxisUnitPayload,
|
||||
@@ -209,7 +210,7 @@ function TimeSeries({
|
||||
{showSaveUnitButton && (
|
||||
<div className="save-unit-container">
|
||||
<Typography.Text>
|
||||
Save the selected unit for this metric?
|
||||
Set the selected unit as the metric unit?
|
||||
</Typography.Text>
|
||||
<Button
|
||||
type="primary"
|
||||
@@ -229,64 +230,71 @@ function TimeSeries({
|
||||
'time-series-container': changeLayoutForOneChartPerQuery,
|
||||
})}
|
||||
>
|
||||
{responseData.map((datapoint, index) => {
|
||||
const isQueryDataItem = index < metricNames.length;
|
||||
const metricName = isQueryDataItem ? metricNames[index] : undefined;
|
||||
const metricUnit = isQueryDataItem ? metricUnits[index] : undefined;
|
||||
{metricNames.length === 0 && <EmptyMetricsSearch />}
|
||||
{metricNames.length > 0 &&
|
||||
responseData.map((datapoint, index) => {
|
||||
const isQueryDataItem = index < metricNames.length;
|
||||
const metricName = isQueryDataItem ? metricNames[index] : undefined;
|
||||
const metricUnit = isQueryDataItem ? metricUnits[index] : undefined;
|
||||
|
||||
// Show the no unit warning if -
|
||||
// 1. The metric query is not loading
|
||||
// 2. The metric units are not loading
|
||||
// 3. There are more than one metric
|
||||
// 4. The current metric unit is empty
|
||||
// 5. Is a queryData item
|
||||
const isMetricUnitEmpty =
|
||||
isQueryDataItem &&
|
||||
!queries[index].isLoading &&
|
||||
!isMetricUnitsLoading &&
|
||||
metricUnits.length > 1 &&
|
||||
!metricUnit &&
|
||||
metricName;
|
||||
// Show the no unit warning if -
|
||||
// 1. The metric query is not loading
|
||||
// 2. The metric units are not loading
|
||||
// 3. There are more than one metric
|
||||
// 4. The current metric unit is empty
|
||||
// 5. Is a queryData item
|
||||
const isMetricUnitEmpty =
|
||||
isQueryDataItem &&
|
||||
!queries[index].isLoading &&
|
||||
!isMetricUnitsLoading &&
|
||||
metricUnits.length > 1 &&
|
||||
!metricUnit &&
|
||||
metricName;
|
||||
|
||||
const currentYAxisUnit = yAxisUnit || metricUnit;
|
||||
const currentYAxisUnit = yAxisUnit || metricUnit;
|
||||
|
||||
return (
|
||||
<div
|
||||
className="time-series-view"
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
key={index}
|
||||
>
|
||||
{isMetricUnitEmpty && metricName && (
|
||||
<Tooltip
|
||||
className="no-unit-warning"
|
||||
title={
|
||||
<Typography.Text>
|
||||
This metric does not have a unit. Please set one for it in the{' '}
|
||||
<Typography.Link
|
||||
onClick={(): void => handleOpenMetricDetails(metricName)}
|
||||
>
|
||||
metric details
|
||||
</Typography.Link>{' '}
|
||||
page.
|
||||
</Typography.Text>
|
||||
}
|
||||
>
|
||||
<AlertTriangle size={16} color={Color.BG_AMBER_400} />
|
||||
</Tooltip>
|
||||
)}
|
||||
<TimeSeriesView
|
||||
isFilterApplied={false}
|
||||
isError={queries[index].isError}
|
||||
isLoading={queries[index].isLoading || isMetricUnitsLoading}
|
||||
data={datapoint}
|
||||
yAxisUnit={currentYAxisUnit}
|
||||
dataSource={DataSource.METRICS}
|
||||
error={queries[index].error as APIError}
|
||||
setWarning={setWarning}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
return (
|
||||
<div
|
||||
className="time-series-view"
|
||||
// eslint-disable-next-line react/no-array-index-key
|
||||
key={index}
|
||||
>
|
||||
{isMetricUnitEmpty && metricName && (
|
||||
<Tooltip
|
||||
className="no-unit-warning"
|
||||
title={
|
||||
<Typography.Text>
|
||||
No unit is set for this metric. You can assign one from the{' '}
|
||||
<Typography.Link
|
||||
onClick={(): void => handleOpenMetricDetails(metricName)}
|
||||
>
|
||||
metric details
|
||||
</Typography.Link>{' '}
|
||||
page.
|
||||
</Typography.Text>
|
||||
}
|
||||
>
|
||||
<AlertTriangle
|
||||
size={16}
|
||||
color={Color.BG_AMBER_400}
|
||||
role="img"
|
||||
aria-label="no unit warning"
|
||||
/>
|
||||
</Tooltip>
|
||||
)}
|
||||
<TimeSeriesView
|
||||
isFilterApplied={false}
|
||||
isError={queries[index].isError}
|
||||
isLoading={queries[index].isLoading || isMetricUnitsLoading}
|
||||
data={datapoint}
|
||||
yAxisUnit={currentYAxisUnit}
|
||||
dataSource={DataSource.METRICS}
|
||||
error={queries[index].error as APIError}
|
||||
setWarning={setWarning}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
import { render, screen } from '@testing-library/react';
|
||||
|
||||
import EmptyMetricsSearch from '../EmptyMetricsSearch';
|
||||
|
||||
describe('EmptyMetricsSearch', () => {
|
||||
it('shows select metric message when no query has been run', () => {
|
||||
render(<EmptyMetricsSearch />);
|
||||
|
||||
expect(
|
||||
screen.getByText('Select a metric and run a query to see the results'),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('shows no data message when a query returned empty results', () => {
|
||||
render(<EmptyMetricsSearch hasQueryResult />);
|
||||
|
||||
expect(screen.getByText('No data')).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
@@ -8,7 +8,7 @@ import {
|
||||
MetrictypesTemporalityDTO,
|
||||
MetrictypesTypeDTO,
|
||||
} from 'api/generated/services/sigNoz.schemas';
|
||||
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { initialQueriesMap } from 'constants/queryBuilder';
|
||||
import * as useOptionsMenuHooks from 'container/OptionsMenu';
|
||||
import * as useUpdateDashboardHooks from 'hooks/dashboard/useUpdateDashboard';
|
||||
import * as useQueryBuilderHooks from 'hooks/queryBuilder/useQueryBuilder';
|
||||
@@ -157,26 +157,6 @@ describe('Explorer', () => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should render Explorer query builder with metrics datasource selected', () => {
|
||||
jest.spyOn(useQueryBuilderHooks, 'useQueryBuilder').mockReturnValue({
|
||||
...mockUseQueryBuilderData,
|
||||
stagedQuery: initialQueriesMap[DataSource.TRACES],
|
||||
} as any);
|
||||
|
||||
(useSearchParams as jest.Mock).mockReturnValue([
|
||||
new URLSearchParams({ isOneChartPerQueryEnabled: 'false' }),
|
||||
mockSetSearchParams,
|
||||
]);
|
||||
|
||||
renderExplorer();
|
||||
|
||||
expect(mockUpdateAllQueriesOperators).toHaveBeenCalledWith(
|
||||
initialQueriesMap[DataSource.METRICS],
|
||||
PANEL_TYPES.TIME_SERIES,
|
||||
DataSource.METRICS,
|
||||
);
|
||||
});
|
||||
|
||||
it('should enable one chart per query toggle when oneChartPerQuery=true in URL', () => {
|
||||
(useSearchParams as jest.Mock).mockReturnValue([
|
||||
new URLSearchParams({ isOneChartPerQueryEnabled: 'true' }),
|
||||
@@ -241,20 +221,46 @@ describe('Explorer', () => {
|
||||
expect(yAxisUnitSelector).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should hide y axis unit selector for multiple metrics with different units', () => {
|
||||
it('one chart per query toggle should be forced on and disabled when multiple metrics have different units', () => {
|
||||
const mockQueryData = {
|
||||
...initialQueriesMap[DataSource.METRICS].builder.queryData[0],
|
||||
aggregateAttribute: {
|
||||
...(initialQueriesMap[DataSource.METRICS].builder.queryData[0]
|
||||
.aggregateAttribute as BaseAutocompleteData),
|
||||
key: 'metric1',
|
||||
},
|
||||
};
|
||||
const mockStagedQueryWithMultipleQueries = {
|
||||
...initialQueriesMap[DataSource.METRICS],
|
||||
builder: {
|
||||
...initialQueriesMap[DataSource.METRICS].builder,
|
||||
queryData: [mockQueryData, mockQueryData],
|
||||
},
|
||||
};
|
||||
|
||||
jest.spyOn(useQueryBuilderHooks, 'useQueryBuilder').mockReturnValue(({
|
||||
...mockUseQueryBuilderData,
|
||||
stagedQuery: mockStagedQueryWithMultipleQueries,
|
||||
} as Partial<QueryBuilderContextType>) as QueryBuilderContextType);
|
||||
|
||||
jest.spyOn(useGetMetricsHooks, 'useGetMetrics').mockReturnValue({
|
||||
isLoading: false,
|
||||
isError: false,
|
||||
metrics: [MOCK_METRIC_METADATA, MOCK_METRIC_METADATA],
|
||||
metrics: [
|
||||
{ ...MOCK_METRIC_METADATA, unit: 'seconds' },
|
||||
{ ...MOCK_METRIC_METADATA, unit: 'bytes' },
|
||||
],
|
||||
});
|
||||
|
||||
(useSearchParams as jest.Mock).mockReturnValue([
|
||||
new URLSearchParams({ isOneChartPerQueryEnabled: 'false' }),
|
||||
mockSetSearchParams,
|
||||
]);
|
||||
|
||||
renderExplorer();
|
||||
|
||||
const yAxisUnitSelector = screen.queryByTestId(Y_AXIS_UNIT_SELECTOR_TEST_ID);
|
||||
expect(yAxisUnitSelector).not.toBeInTheDocument();
|
||||
|
||||
// One chart per query toggle should be disabled
|
||||
const oneChartPerQueryToggle = screen.getByRole('switch');
|
||||
expect(oneChartPerQueryToggle).toBeChecked();
|
||||
expect(oneChartPerQueryToggle).toBeDisabled();
|
||||
});
|
||||
|
||||
@@ -327,4 +333,53 @@ describe('Explorer', () => {
|
||||
const oneChartPerQueryToggle = screen.getByRole('switch');
|
||||
expect(oneChartPerQueryToggle).toBeEnabled();
|
||||
});
|
||||
|
||||
it('one chart per query toggle should be enabled when multiple metrics have no unit', () => {
|
||||
const metricWithNoUnit = {
|
||||
type: MetrictypesTypeDTO.sum,
|
||||
description: 'metric without unit',
|
||||
unit: '',
|
||||
temporality: MetrictypesTemporalityDTO.cumulative,
|
||||
isMonotonic: true,
|
||||
};
|
||||
const mockQueryData = {
|
||||
...initialQueriesMap[DataSource.METRICS].builder.queryData[0],
|
||||
aggregateAttribute: {
|
||||
...(initialQueriesMap[DataSource.METRICS].builder.queryData[0]
|
||||
.aggregateAttribute as BaseAutocompleteData),
|
||||
key: 'metric1',
|
||||
},
|
||||
};
|
||||
const mockStagedQueryWithMultipleQueries = {
|
||||
...initialQueriesMap[DataSource.METRICS],
|
||||
builder: {
|
||||
...initialQueriesMap[DataSource.METRICS].builder,
|
||||
queryData: [mockQueryData, mockQueryData],
|
||||
},
|
||||
};
|
||||
|
||||
jest.spyOn(useQueryBuilderHooks, 'useQueryBuilder').mockReturnValue(({
|
||||
...mockUseQueryBuilderData,
|
||||
stagedQuery: mockStagedQueryWithMultipleQueries,
|
||||
} as Partial<QueryBuilderContextType>) as QueryBuilderContextType);
|
||||
|
||||
jest.spyOn(useGetMetricsHooks, 'useGetMetrics').mockReturnValue({
|
||||
isLoading: false,
|
||||
isError: false,
|
||||
metrics: [metricWithNoUnit, metricWithNoUnit],
|
||||
});
|
||||
|
||||
(useSearchParams as jest.Mock).mockReturnValue([
|
||||
new URLSearchParams({ isOneChartPerQueryEnabled: 'false' }),
|
||||
mockSetSearchParams,
|
||||
]);
|
||||
|
||||
renderExplorer();
|
||||
|
||||
const oneChartPerQueryToggle = screen.getByRole('switch');
|
||||
// Toggle should be enabled (not forced/disabled) since both metrics
|
||||
// have the same unit (no unit) and should be viewable on the same graph
|
||||
expect(oneChartPerQueryToggle).toBeEnabled();
|
||||
expect(oneChartPerQueryToggle).not.toBeChecked();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { render, RenderResult, screen, waitFor } from '@testing-library/react';
|
||||
import { render, screen } from '@testing-library/react';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import * as metricsExplorerHooks from 'api/generated/services/metrics';
|
||||
|
||||
@@ -56,7 +56,7 @@ const mockSetYAxisUnit = jest.fn();
|
||||
|
||||
function renderTimeSeries(
|
||||
overrides: Partial<TimeSeriesProps> = {},
|
||||
): RenderResult {
|
||||
): ReturnType<typeof render> {
|
||||
return render(
|
||||
<TimeSeries
|
||||
showOneChartPerQuery={false}
|
||||
@@ -84,45 +84,57 @@ describe('TimeSeries', () => {
|
||||
} as Partial<UseUpdateMetricMetadataReturnType>) as UseUpdateMetricMetadataReturnType);
|
||||
});
|
||||
|
||||
it('shows select metric message when no metric is selected', () => {
|
||||
renderTimeSeries({ metricNames: [] });
|
||||
|
||||
expect(
|
||||
screen.getByText('Select a metric and run a query to see the results'),
|
||||
).toBeInTheDocument();
|
||||
expect(screen.queryByText('TimeSeriesView')).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('renders chart view when a metric is selected', () => {
|
||||
renderTimeSeries({
|
||||
metricNames: ['metric1'],
|
||||
metricUnits: ['count'],
|
||||
metrics: [MOCK_METRIC_METADATA],
|
||||
});
|
||||
|
||||
expect(screen.getByText('TimeSeriesView')).toBeInTheDocument();
|
||||
expect(
|
||||
screen.queryByText('Select a metric and run a query to see the results'),
|
||||
).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should render a warning icon when a metric has no unit among multiple metrics', () => {
|
||||
const user = userEvent.setup();
|
||||
const { container } = renderTimeSeries({
|
||||
renderTimeSeries({
|
||||
metricUnits: ['', 'count'],
|
||||
metricNames: ['metric1', 'metric2'],
|
||||
metrics: [undefined, undefined],
|
||||
});
|
||||
|
||||
const alertIcon = container.querySelector('.no-unit-warning') as HTMLElement;
|
||||
user.hover(alertIcon);
|
||||
waitFor(() =>
|
||||
expect(
|
||||
screen.findByText('This metric does not have a unit'),
|
||||
).toBeInTheDocument(),
|
||||
);
|
||||
expect(
|
||||
screen.getByRole('img', { name: 'no unit warning' }),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('clicking on warning icon tooltip should open metric details modal', async () => {
|
||||
it('warning tooltip shows metric details link', async () => {
|
||||
const user = userEvent.setup();
|
||||
const { container } = renderTimeSeries({
|
||||
renderTimeSeries({
|
||||
metricUnits: ['', 'count'],
|
||||
metricNames: ['metric1', 'metric2'],
|
||||
metrics: [MOCK_METRIC_METADATA, MOCK_METRIC_METADATA],
|
||||
yAxisUnit: 'seconds',
|
||||
});
|
||||
|
||||
const alertIcon = container.querySelector('.no-unit-warning') as HTMLElement;
|
||||
user.hover(alertIcon);
|
||||
const alertIcon = screen.getByRole('img', { name: 'no unit warning' });
|
||||
await user.hover(alertIcon);
|
||||
|
||||
const metricDetailsLink = await screen.findByText('metric details');
|
||||
user.click(metricDetailsLink);
|
||||
|
||||
waitFor(() =>
|
||||
expect(mockSetIsMetricDetailsOpen).toHaveBeenCalledWith('metric1'),
|
||||
);
|
||||
expect(await screen.findByText('metric details')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('shows Save unit button when metric had no unit but one is selected', async () => {
|
||||
const { findByText, getByRole } = renderTimeSeries({
|
||||
it('shows save unit prompt with enabled button when metric has no unit and a unit is selected', async () => {
|
||||
renderTimeSeries({
|
||||
metricUnits: [undefined],
|
||||
metricNames: ['metric1'],
|
||||
metrics: [MOCK_METRIC_METADATA],
|
||||
@@ -131,38 +143,10 @@ describe('TimeSeries', () => {
|
||||
});
|
||||
|
||||
expect(
|
||||
await findByText('Save the selected unit for this metric?'),
|
||||
await screen.findByText('Set the selected unit as the metric unit?'),
|
||||
).toBeInTheDocument();
|
||||
|
||||
const yesButton = getByRole('button', { name: 'Yes' });
|
||||
expect(yesButton).toBeInTheDocument();
|
||||
const yesButton = screen.getByRole('button', { name: 'Yes' });
|
||||
expect(yesButton).toBeEnabled();
|
||||
});
|
||||
|
||||
it('clicking on save unit button shoould upated metric metadata', async () => {
|
||||
const user = userEvent.setup();
|
||||
const { getByRole } = renderTimeSeries({
|
||||
metricUnits: [''],
|
||||
metricNames: ['metric1'],
|
||||
metrics: [MOCK_METRIC_METADATA],
|
||||
yAxisUnit: 'seconds',
|
||||
showYAxisUnitSelector: true,
|
||||
});
|
||||
|
||||
const yesButton = getByRole('button', { name: /Yes/i });
|
||||
await user.click(yesButton);
|
||||
|
||||
expect(mockUpdateMetricMetadata).toHaveBeenCalledWith(
|
||||
{
|
||||
pathParams: {
|
||||
metricName: 'metric1',
|
||||
},
|
||||
data: expect.objectContaining({ unit: 'seconds' }),
|
||||
},
|
||||
expect.objectContaining({
|
||||
onSuccess: expect.any(Function),
|
||||
onError: expect.any(Function),
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -139,4 +139,14 @@ describe('getMetricUnits', () => {
|
||||
expect(result).toHaveLength(1);
|
||||
expect(result[0]).toBe('s');
|
||||
});
|
||||
|
||||
it('should return undefined for metrics with no unit', () => {
|
||||
const result = getMetricUnits([
|
||||
{ ...MOCK_METRIC_METADATA, unit: '' },
|
||||
{ ...MOCK_METRIC_METADATA, unit: '' },
|
||||
]);
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result[0]).toBeUndefined();
|
||||
expect(result[1]).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { useState } from 'react';
|
||||
import { Typography } from 'antd';
|
||||
import { initialQueriesMap } from 'constants/queryBuilder';
|
||||
import { AggregatorFilter } from 'container/QueryBuilder/filters';
|
||||
import { MetricNameSelector } from 'container/QueryBuilder/filters';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
@@ -27,7 +27,7 @@ function MetricNameSearch({
|
||||
className="inspect-metrics-input-group metric-name-search"
|
||||
>
|
||||
<Typography.Text>From</Typography.Text>
|
||||
<AggregatorFilter
|
||||
<MetricNameSelector
|
||||
defaultValue={searchText ?? ''}
|
||||
query={initialQueriesMap[DataSource.METRICS].builder.queryData[0]}
|
||||
onSelect={handleSetMetricName}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import { QueryClient, QueryClientProvider } from 'react-query';
|
||||
// eslint-disable-next-line no-restricted-imports
|
||||
import { Provider } from 'react-redux';
|
||||
import { render, screen } from '@testing-library/react';
|
||||
import { fireEvent, render, screen, within } from '@testing-library/react';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import * as metricsService from 'api/generated/services/metrics';
|
||||
import { MetricType } from 'api/metricsExplorer/getMetricsList';
|
||||
import * as appContextHooks from 'providers/App/App';
|
||||
import store from 'store';
|
||||
@@ -23,27 +24,31 @@ jest.mock('react-router-dom', () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('container/QueryBuilder/filters', () => ({
|
||||
AggregatorFilter: ({ onSelect, onChange, defaultValue }: any): JSX.Element => (
|
||||
<div data-testid="mock-aggregator-filter">
|
||||
<input
|
||||
data-testid="metric-name-input"
|
||||
defaultValue={defaultValue}
|
||||
onChange={(e: React.ChangeEvent<HTMLInputElement>): void =>
|
||||
onChange({ key: e.target.value })
|
||||
}
|
||||
/>
|
||||
<button
|
||||
type="button"
|
||||
data-testid="select-metric-button"
|
||||
onClick={(): void => onSelect({ key: 'test_metric_2' })}
|
||||
>
|
||||
Select Metric
|
||||
</button>
|
||||
</div>
|
||||
),
|
||||
jest.mock('api/generated/services/metrics', () => ({
|
||||
useListMetrics: jest.fn().mockReturnValue({
|
||||
isFetching: false,
|
||||
isError: false,
|
||||
data: { data: { metrics: [] } },
|
||||
}),
|
||||
useUpdateMetricMetadata: jest.fn().mockReturnValue({
|
||||
mutate: jest.fn(),
|
||||
isLoading: false,
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('hooks/useDebounce', () => ({
|
||||
__esModule: true,
|
||||
default: <T,>(value: T): T => value,
|
||||
}));
|
||||
|
||||
jest.mock(
|
||||
'container/QueryBuilder/filters/QueryBuilderSearch/OptionRenderer',
|
||||
() => ({
|
||||
__esModule: true,
|
||||
default: ({ value }: { value: string }): JSX.Element => <span>{value}</span>,
|
||||
}),
|
||||
);
|
||||
|
||||
jest.spyOn(appContextHooks, 'useAppContext').mockReturnValue({
|
||||
user: {
|
||||
role: 'admin',
|
||||
@@ -123,6 +128,24 @@ describe('QueryBuilder', () => {
|
||||
|
||||
it('should call setCurrentMetricName when metric name is selected', async () => {
|
||||
const user = userEvent.setup();
|
||||
(metricsService.useListMetrics as jest.Mock).mockReturnValue({
|
||||
isFetching: false,
|
||||
isError: false,
|
||||
data: {
|
||||
data: {
|
||||
metrics: [
|
||||
{
|
||||
metricName: 'test_metric_2',
|
||||
type: 'Sum',
|
||||
isMonotonic: true,
|
||||
description: '',
|
||||
temporality: 'cumulative',
|
||||
unit: '',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
render(
|
||||
<QueryClientProvider client={queryClient}>
|
||||
@@ -137,8 +160,12 @@ describe('QueryBuilder', () => {
|
||||
|
||||
expect(screen.getByText('From')).toBeInTheDocument();
|
||||
|
||||
const selectButton = screen.getByTestId('select-metric-button');
|
||||
await user.click(selectButton);
|
||||
const input = within(metricNameSearch).getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'test_metric_2' } });
|
||||
|
||||
const options = document.querySelectorAll('.ant-select-item');
|
||||
expect(options.length).toBeGreaterThan(0);
|
||||
await user.click(options[0] as HTMLElement);
|
||||
|
||||
expect(mockSetCurrentMetricName).toHaveBeenCalledWith('test_metric_2');
|
||||
});
|
||||
|
||||
@@ -24,6 +24,7 @@ import {
|
||||
AggregatorFilter,
|
||||
GroupByFilter,
|
||||
HavingFilter,
|
||||
MetricNameSelector,
|
||||
OperatorsSelect,
|
||||
OrderByFilter,
|
||||
ReduceToFilter,
|
||||
@@ -403,7 +404,7 @@ export const Query = memo(function Query({
|
||||
)}
|
||||
|
||||
<Col flex="auto">
|
||||
<AggregatorFilter
|
||||
<MetricNameSelector
|
||||
onChange={handleChangeAggregatorAttribute}
|
||||
query={query}
|
||||
/>
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
.metric-name-selector {
|
||||
.ant-select-selection-placeholder {
|
||||
color: var(--bg-slate-200);
|
||||
font-style: italic;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,887 @@
|
||||
import { useEffect, useState } from 'react';
|
||||
import { fireEvent, render, screen, within } from '@testing-library/react';
|
||||
import {
|
||||
MetricsexplorertypesListMetricDTO,
|
||||
MetrictypesTypeDTO,
|
||||
} from 'api/generated/services/sigNoz.schemas';
|
||||
import { ENTITY_VERSION_V5 } from 'constants/app';
|
||||
import { ATTRIBUTE_TYPES } from 'constants/queryBuilder';
|
||||
import { useQueryOperations } from 'hooks/queryBuilder/useQueryBuilderOperations';
|
||||
import { DataTypes } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { MetricAggregation } from 'types/api/v5/queryRange';
|
||||
import { DataSource, ReduceOperators } from 'types/common/queryBuilder';
|
||||
|
||||
import { MetricNameSelector } from './MetricNameSelector';
|
||||
|
||||
const mockUseListMetrics = jest.fn();
|
||||
jest.mock('api/generated/services/metrics', () => ({
|
||||
useListMetrics: (...args: unknown[]): ReturnType<typeof mockUseListMetrics> =>
|
||||
mockUseListMetrics(...args),
|
||||
}));
|
||||
|
||||
jest.mock('hooks/useDebounce', () => ({
|
||||
__esModule: true,
|
||||
default: <T,>(value: T): T => value,
|
||||
}));
|
||||
|
||||
jest.mock('../QueryBuilderSearch/OptionRenderer', () => ({
|
||||
__esModule: true,
|
||||
default: ({ value }: { value: string }): JSX.Element => <span>{value}</span>,
|
||||
}));
|
||||
|
||||
// Ref lets StatefulMetricQueryHarness wire handleSetQueryData to real state,
|
||||
// while other tests keep the default no-op mock.
|
||||
const handleSetQueryDataRef: {
|
||||
current: (index: number, query: IBuilderQuery) => void;
|
||||
} = {
|
||||
current: jest.fn(),
|
||||
};
|
||||
|
||||
jest.mock('hooks/queryBuilder/useQueryBuilder', () => ({
|
||||
useQueryBuilder: (): Record<string, unknown> => ({
|
||||
handleSetQueryData: (index: number, query: IBuilderQuery): void =>
|
||||
handleSetQueryDataRef.current(index, query),
|
||||
handleSetTraceOperatorData: jest.fn(),
|
||||
handleSetFormulaData: jest.fn(),
|
||||
removeQueryBuilderEntityByIndex: jest.fn(),
|
||||
panelType: 'TIME_SERIES',
|
||||
initialDataSource: DataSource.METRICS,
|
||||
currentQuery: {
|
||||
builder: { queryData: [], queryFormulas: [], queryTraceOperator: [] },
|
||||
queryType: 'builder',
|
||||
},
|
||||
setLastUsedQuery: jest.fn(),
|
||||
redirectWithQueryBuilderData: jest.fn(),
|
||||
}),
|
||||
}));
|
||||
|
||||
function makeMetric(
|
||||
overrides: Partial<MetricsexplorertypesListMetricDTO> = {},
|
||||
): MetricsexplorertypesListMetricDTO {
|
||||
return {
|
||||
metricName: 'http_requests_total',
|
||||
type: MetrictypesTypeDTO.sum,
|
||||
isMonotonic: true,
|
||||
description: '',
|
||||
temporality: 'cumulative' as never,
|
||||
unit: '',
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
function makeQuery(overrides: Partial<IBuilderQuery> = {}): IBuilderQuery {
|
||||
return {
|
||||
dataSource: DataSource.METRICS,
|
||||
queryName: 'A',
|
||||
aggregateOperator: 'count',
|
||||
aggregateAttribute: { key: '', type: '', dataType: DataTypes.Float64 },
|
||||
timeAggregation: 'avg',
|
||||
spaceAggregation: 'sum',
|
||||
filter: { expression: '' },
|
||||
aggregations: [],
|
||||
functions: [],
|
||||
filters: { items: [], op: 'AND' },
|
||||
expression: 'A',
|
||||
disabled: false,
|
||||
stepInterval: null,
|
||||
having: [],
|
||||
limit: null,
|
||||
orderBy: [],
|
||||
groupBy: [],
|
||||
legend: '',
|
||||
reduceTo: ReduceOperators.AVG,
|
||||
...overrides,
|
||||
} as IBuilderQuery;
|
||||
}
|
||||
|
||||
function returnMetrics(
|
||||
metrics: MetricsexplorertypesListMetricDTO[],
|
||||
overrides: Record<string, unknown> = {},
|
||||
): void {
|
||||
mockUseListMetrics.mockReturnValue({
|
||||
isFetching: false,
|
||||
isError: false,
|
||||
data: { data: { metrics } },
|
||||
queryKey: ['/api/v2/metrics'],
|
||||
...overrides,
|
||||
});
|
||||
}
|
||||
|
||||
// snippet so tests can assert on them.
|
||||
function MetricQueryHarness({ query }: { query: IBuilderQuery }): JSX.Element {
|
||||
const {
|
||||
handleChangeAggregatorAttribute,
|
||||
operators,
|
||||
spaceAggregationOptions,
|
||||
} = useQueryOperations({
|
||||
query,
|
||||
index: 0,
|
||||
entityVersion: ENTITY_VERSION_V5,
|
||||
});
|
||||
|
||||
return (
|
||||
<div>
|
||||
<MetricNameSelector
|
||||
query={query}
|
||||
onChange={handleChangeAggregatorAttribute}
|
||||
/>
|
||||
<ul data-testid="time-agg-options">
|
||||
{operators.map((op) => (
|
||||
<li key={op.value}>{op.label}</li>
|
||||
))}
|
||||
</ul>
|
||||
<ul data-testid="space-agg-options">
|
||||
{spaceAggregationOptions.map((op) => (
|
||||
<li key={op.value}>{op.label}</li>
|
||||
))}
|
||||
</ul>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function getOptionLabels(testId: string): string[] {
|
||||
const list = screen.getByTestId(testId);
|
||||
const items = within(list).queryAllByRole('listitem');
|
||||
return items.map((el) => el.textContent || '');
|
||||
}
|
||||
|
||||
describe('MetricNameSelector', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
handleSetQueryDataRef.current = jest.fn();
|
||||
returnMetrics([]);
|
||||
});
|
||||
|
||||
it('shows metric names from API as dropdown options', () => {
|
||||
returnMetrics([
|
||||
makeMetric({ metricName: 'http_requests_total' }),
|
||||
makeMetric({
|
||||
metricName: 'cpu_usage_percent',
|
||||
type: MetrictypesTypeDTO.gauge,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(<MetricNameSelector query={makeQuery()} onChange={jest.fn()} />);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'h' } });
|
||||
|
||||
expect(
|
||||
screen.getAllByText('http_requests_total').length,
|
||||
).toBeGreaterThanOrEqual(1);
|
||||
expect(
|
||||
screen.getAllByText('cpu_usage_percent').length,
|
||||
).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it('retains typed metric name in input after blur', () => {
|
||||
returnMetrics([makeMetric({ metricName: 'http_requests_total' })]);
|
||||
|
||||
render(<MetricNameSelector query={makeQuery()} onChange={jest.fn()} />);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'http_requests_total' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(input).toHaveValue('http_requests_total');
|
||||
});
|
||||
|
||||
it('shows error message when API request fails', () => {
|
||||
mockUseListMetrics.mockReturnValue({
|
||||
isFetching: false,
|
||||
isError: true,
|
||||
data: undefined,
|
||||
queryKey: ['/api/v2/metrics'],
|
||||
});
|
||||
|
||||
render(<MetricNameSelector query={makeQuery()} onChange={jest.fn()} />);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.focus(input);
|
||||
fireEvent.change(input, { target: { value: 'test' } });
|
||||
|
||||
expect(screen.getByText('Failed to load metrics')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('shows loading spinner while fetching metrics', () => {
|
||||
mockUseListMetrics.mockReturnValue({
|
||||
isFetching: true,
|
||||
isError: false,
|
||||
data: undefined,
|
||||
queryKey: ['/api/v2/metrics'],
|
||||
});
|
||||
|
||||
const { container } = render(
|
||||
<MetricNameSelector query={makeQuery()} onChange={jest.fn()} />,
|
||||
);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'test' } });
|
||||
|
||||
expect(container.querySelector('.ant-spin-spinning')).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
describe('selecting a metric type updates the aggregation options', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
handleSetQueryDataRef.current = jest.fn();
|
||||
returnMetrics([]);
|
||||
});
|
||||
|
||||
it('Sum metric shows Rate/Increase time options and Sum/Avg/Min/Max space options', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'http_requests_total',
|
||||
type: MetrictypesTypeDTO.sum,
|
||||
isMonotonic: true,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(<MetricQueryHarness query={makeQuery()} />);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'http_requests_total' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(getOptionLabels('time-agg-options')).toEqual(['Rate', 'Increase']);
|
||||
expect(getOptionLabels('space-agg-options')).toEqual([
|
||||
'Sum',
|
||||
'Avg',
|
||||
'Min',
|
||||
'Max',
|
||||
]);
|
||||
});
|
||||
|
||||
it('Gauge metric shows Latest/Sum/Avg/Min/Max/Count/Count Distinct time options and Sum/Avg/Min/Max space options', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'cpu_usage_percent',
|
||||
type: MetrictypesTypeDTO.gauge,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(<MetricQueryHarness query={makeQuery()} />);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'cpu_usage_percent' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(getOptionLabels('time-agg-options')).toEqual([
|
||||
'Latest',
|
||||
'Sum',
|
||||
'Avg',
|
||||
'Min',
|
||||
'Max',
|
||||
'Count',
|
||||
'Count Distinct',
|
||||
]);
|
||||
expect(getOptionLabels('space-agg-options')).toEqual([
|
||||
'Sum',
|
||||
'Avg',
|
||||
'Min',
|
||||
'Max',
|
||||
]);
|
||||
});
|
||||
|
||||
it('non-monotonic Sum metric is treated as Gauge', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'active_connections',
|
||||
type: MetrictypesTypeDTO.sum,
|
||||
isMonotonic: false,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(<MetricQueryHarness query={makeQuery()} />);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, {
|
||||
target: { value: 'active_connections' },
|
||||
});
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(getOptionLabels('time-agg-options')).toEqual([
|
||||
'Latest',
|
||||
'Sum',
|
||||
'Avg',
|
||||
'Min',
|
||||
'Max',
|
||||
'Count',
|
||||
'Count Distinct',
|
||||
]);
|
||||
expect(getOptionLabels('space-agg-options')).toEqual([
|
||||
'Sum',
|
||||
'Avg',
|
||||
'Min',
|
||||
'Max',
|
||||
]);
|
||||
});
|
||||
|
||||
it('Histogram metric shows no time options and P50–P99 space options', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'request_duration_seconds',
|
||||
type: MetrictypesTypeDTO.histogram,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(<MetricQueryHarness query={makeQuery()} />);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, {
|
||||
target: { value: 'request_duration_seconds' },
|
||||
});
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(getOptionLabels('time-agg-options')).toEqual([]);
|
||||
expect(getOptionLabels('space-agg-options')).toEqual([
|
||||
'P50',
|
||||
'P75',
|
||||
'P90',
|
||||
'P95',
|
||||
'P99',
|
||||
]);
|
||||
});
|
||||
|
||||
it('ExponentialHistogram metric shows no time options and P50–P99 space options', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'request_duration_exp',
|
||||
type: MetrictypesTypeDTO.exponentialhistogram,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(<MetricQueryHarness query={makeQuery()} />);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, {
|
||||
target: { value: 'request_duration_exp' },
|
||||
});
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(getOptionLabels('time-agg-options')).toEqual([]);
|
||||
expect(getOptionLabels('space-agg-options')).toEqual([
|
||||
'P50',
|
||||
'P75',
|
||||
'P90',
|
||||
'P95',
|
||||
'P99',
|
||||
]);
|
||||
});
|
||||
|
||||
it('unknown metric (typed name not in API results) shows all time and space options', () => {
|
||||
returnMetrics([makeMetric({ metricName: 'known_metric' })]);
|
||||
|
||||
render(<MetricQueryHarness query={makeQuery()} />);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'unknown_metric' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(getOptionLabels('time-agg-options')).toEqual([
|
||||
'Max',
|
||||
'Min',
|
||||
'Sum',
|
||||
'Avg',
|
||||
'Count',
|
||||
'Rate',
|
||||
'Increase',
|
||||
]);
|
||||
expect(getOptionLabels('space-agg-options')).toEqual([
|
||||
'Sum',
|
||||
'Avg',
|
||||
'Min',
|
||||
'Max',
|
||||
'P50',
|
||||
'P75',
|
||||
'P90',
|
||||
'P95',
|
||||
'P99',
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
// these tests require the previous state, so we setup it to
|
||||
// tracks previousMetricInfo across metric selections
|
||||
function StatefulMetricQueryHarness({
|
||||
initialQuery,
|
||||
}: {
|
||||
initialQuery: IBuilderQuery;
|
||||
}): JSX.Element {
|
||||
const [query, setQuery] = useState(initialQuery);
|
||||
|
||||
useEffect(() => {
|
||||
handleSetQueryDataRef.current = (
|
||||
_index: number,
|
||||
newQuery: IBuilderQuery,
|
||||
): void => {
|
||||
setQuery(newQuery);
|
||||
};
|
||||
return (): void => {
|
||||
handleSetQueryDataRef.current = jest.fn();
|
||||
};
|
||||
}, []);
|
||||
|
||||
const {
|
||||
handleChangeAggregatorAttribute,
|
||||
operators,
|
||||
spaceAggregationOptions,
|
||||
} = useQueryOperations({
|
||||
query,
|
||||
index: 0,
|
||||
entityVersion: ENTITY_VERSION_V5,
|
||||
});
|
||||
|
||||
const currentAggregation = query.aggregations?.[0] as MetricAggregation;
|
||||
|
||||
return (
|
||||
<div>
|
||||
<MetricNameSelector
|
||||
query={query}
|
||||
onChange={handleChangeAggregatorAttribute}
|
||||
/>
|
||||
<ul data-testid="time-agg-options">
|
||||
{operators.map((op) => (
|
||||
<li key={op.value}>{op.label}</li>
|
||||
))}
|
||||
</ul>
|
||||
<ul data-testid="space-agg-options">
|
||||
{spaceAggregationOptions.map((op) => (
|
||||
<li key={op.value}>{op.label}</li>
|
||||
))}
|
||||
</ul>
|
||||
<div data-testid="selected-time-agg">
|
||||
{currentAggregation?.timeAggregation || ''}
|
||||
</div>
|
||||
<div data-testid="selected-space-agg">
|
||||
{currentAggregation?.spaceAggregation || ''}
|
||||
</div>
|
||||
<div data-testid="selected-metric-name">
|
||||
{currentAggregation?.metricName || ''}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
describe('switching between metrics of the same type preserves aggregation settings', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
handleSetQueryDataRef.current = jest.fn();
|
||||
returnMetrics([]);
|
||||
});
|
||||
|
||||
it('Sum: preserves non-default increase/avg when switching to another Sum metric', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'metric_a',
|
||||
type: MetrictypesTypeDTO.sum,
|
||||
isMonotonic: true,
|
||||
}),
|
||||
makeMetric({
|
||||
metricName: 'metric_b',
|
||||
type: MetrictypesTypeDTO.sum,
|
||||
isMonotonic: true,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(
|
||||
<StatefulMetricQueryHarness
|
||||
initialQuery={makeQuery({
|
||||
aggregateAttribute: {
|
||||
key: 'metric_a',
|
||||
type: ATTRIBUTE_TYPES.SUM,
|
||||
dataType: DataTypes.Float64,
|
||||
},
|
||||
aggregations: [
|
||||
{
|
||||
timeAggregation: 'increase',
|
||||
spaceAggregation: 'avg',
|
||||
metricName: 'metric_a',
|
||||
temporality: '',
|
||||
},
|
||||
] as MetricAggregation[],
|
||||
})}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('selected-time-agg')).toHaveTextContent('increase');
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('avg');
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'metric_b' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(screen.getByTestId('selected-time-agg')).toHaveTextContent('increase');
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('avg');
|
||||
expect(screen.getByTestId('selected-metric-name')).toHaveTextContent(
|
||||
'metric_b',
|
||||
);
|
||||
});
|
||||
|
||||
it('Gauge: preserves non-default min/max when switching to another Gauge metric', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'cpu_usage',
|
||||
type: MetrictypesTypeDTO.gauge,
|
||||
}),
|
||||
makeMetric({
|
||||
metricName: 'mem_usage',
|
||||
type: MetrictypesTypeDTO.gauge,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(
|
||||
<StatefulMetricQueryHarness
|
||||
initialQuery={makeQuery({
|
||||
aggregateAttribute: {
|
||||
key: 'cpu_usage',
|
||||
type: ATTRIBUTE_TYPES.GAUGE,
|
||||
dataType: DataTypes.Float64,
|
||||
},
|
||||
aggregations: [
|
||||
{
|
||||
timeAggregation: 'min',
|
||||
spaceAggregation: 'max',
|
||||
metricName: 'cpu_usage',
|
||||
temporality: '',
|
||||
},
|
||||
] as MetricAggregation[],
|
||||
})}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('selected-time-agg')).toHaveTextContent('min');
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('max');
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'mem_usage' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(screen.getByTestId('selected-time-agg')).toHaveTextContent('min');
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('max');
|
||||
expect(screen.getByTestId('selected-metric-name')).toHaveTextContent(
|
||||
'mem_usage',
|
||||
);
|
||||
});
|
||||
|
||||
it('Histogram: preserves non-default p99 when switching to another Histogram metric', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'req_duration',
|
||||
type: MetrictypesTypeDTO.histogram,
|
||||
}),
|
||||
makeMetric({
|
||||
metricName: 'db_latency',
|
||||
type: MetrictypesTypeDTO.histogram,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(
|
||||
<StatefulMetricQueryHarness
|
||||
initialQuery={makeQuery({
|
||||
aggregateAttribute: {
|
||||
key: 'req_duration',
|
||||
type: ATTRIBUTE_TYPES.HISTOGRAM,
|
||||
dataType: DataTypes.Float64,
|
||||
},
|
||||
aggregations: [
|
||||
{
|
||||
timeAggregation: '',
|
||||
spaceAggregation: 'p99',
|
||||
metricName: 'req_duration',
|
||||
temporality: '',
|
||||
},
|
||||
] as MetricAggregation[],
|
||||
})}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('p99');
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'db_latency' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('p99');
|
||||
expect(screen.getByTestId('selected-metric-name')).toHaveTextContent(
|
||||
'db_latency',
|
||||
);
|
||||
});
|
||||
|
||||
it('ExponentialHistogram: preserves non-default p75 when switching to another ExponentialHistogram metric', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'exp_hist_a',
|
||||
type: MetrictypesTypeDTO.exponentialhistogram,
|
||||
}),
|
||||
makeMetric({
|
||||
metricName: 'exp_hist_b',
|
||||
type: MetrictypesTypeDTO.exponentialhistogram,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(
|
||||
<StatefulMetricQueryHarness
|
||||
initialQuery={makeQuery({
|
||||
aggregateAttribute: {
|
||||
key: 'exp_hist_a',
|
||||
type: ATTRIBUTE_TYPES.EXPONENTIAL_HISTOGRAM,
|
||||
dataType: DataTypes.Float64,
|
||||
},
|
||||
aggregations: [
|
||||
{
|
||||
timeAggregation: '',
|
||||
spaceAggregation: 'p75',
|
||||
metricName: 'exp_hist_a',
|
||||
temporality: '',
|
||||
},
|
||||
] as MetricAggregation[],
|
||||
})}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('p75');
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'exp_hist_b' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('p75');
|
||||
expect(screen.getByTestId('selected-metric-name')).toHaveTextContent(
|
||||
'exp_hist_b',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('switching to a different metric type resets aggregation to new defaults', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
handleSetQueryDataRef.current = jest.fn();
|
||||
returnMetrics([]);
|
||||
});
|
||||
|
||||
it('Sum to Gauge: resets from increase/avg to the Gauge defaults avg/avg', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'sum_metric',
|
||||
type: MetrictypesTypeDTO.sum,
|
||||
isMonotonic: true,
|
||||
}),
|
||||
makeMetric({
|
||||
metricName: 'gauge_metric',
|
||||
type: MetrictypesTypeDTO.gauge,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(
|
||||
<StatefulMetricQueryHarness
|
||||
initialQuery={makeQuery({
|
||||
aggregateAttribute: {
|
||||
key: 'sum_metric',
|
||||
type: ATTRIBUTE_TYPES.SUM,
|
||||
dataType: DataTypes.Float64,
|
||||
},
|
||||
aggregations: [
|
||||
{
|
||||
timeAggregation: 'increase',
|
||||
spaceAggregation: 'avg',
|
||||
metricName: 'sum_metric',
|
||||
temporality: '',
|
||||
},
|
||||
] as MetricAggregation[],
|
||||
})}
|
||||
/>,
|
||||
);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'gauge_metric' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(screen.getByTestId('selected-time-agg')).toHaveTextContent('avg');
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('avg');
|
||||
expect(screen.getByTestId('selected-metric-name')).toHaveTextContent(
|
||||
'gauge_metric',
|
||||
);
|
||||
});
|
||||
|
||||
it('Gauge to Histogram: resets from min/max to the Histogram defaults (no time, p90 space)', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'gauge_metric',
|
||||
type: MetrictypesTypeDTO.gauge,
|
||||
}),
|
||||
makeMetric({
|
||||
metricName: 'hist_metric',
|
||||
type: MetrictypesTypeDTO.histogram,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(
|
||||
<StatefulMetricQueryHarness
|
||||
initialQuery={makeQuery({
|
||||
aggregateAttribute: {
|
||||
key: 'gauge_metric',
|
||||
type: ATTRIBUTE_TYPES.GAUGE,
|
||||
dataType: DataTypes.Float64,
|
||||
},
|
||||
aggregations: [
|
||||
{
|
||||
timeAggregation: 'min',
|
||||
spaceAggregation: 'max',
|
||||
metricName: 'gauge_metric',
|
||||
temporality: '',
|
||||
},
|
||||
] as MetricAggregation[],
|
||||
})}
|
||||
/>,
|
||||
);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'hist_metric' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(screen.getByTestId('selected-time-agg')).toHaveTextContent('');
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('p90');
|
||||
expect(screen.getByTestId('selected-metric-name')).toHaveTextContent(
|
||||
'hist_metric',
|
||||
);
|
||||
});
|
||||
|
||||
it('Histogram to Sum: resets from p99 to the Sum defaults rate/sum', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'hist_metric',
|
||||
type: MetrictypesTypeDTO.histogram,
|
||||
}),
|
||||
makeMetric({
|
||||
metricName: 'sum_metric',
|
||||
type: MetrictypesTypeDTO.sum,
|
||||
isMonotonic: true,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(
|
||||
<StatefulMetricQueryHarness
|
||||
initialQuery={makeQuery({
|
||||
aggregateAttribute: {
|
||||
key: 'hist_metric',
|
||||
type: ATTRIBUTE_TYPES.HISTOGRAM,
|
||||
dataType: DataTypes.Float64,
|
||||
},
|
||||
aggregations: [
|
||||
{
|
||||
timeAggregation: '',
|
||||
spaceAggregation: 'p99',
|
||||
metricName: 'hist_metric',
|
||||
temporality: '',
|
||||
},
|
||||
] as MetricAggregation[],
|
||||
})}
|
||||
/>,
|
||||
);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'sum_metric' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(screen.getByTestId('selected-time-agg')).toHaveTextContent('rate');
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('sum');
|
||||
expect(screen.getByTestId('selected-metric-name')).toHaveTextContent(
|
||||
'sum_metric',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('typed metric not in search results is committed with unknown defaults', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
handleSetQueryDataRef.current = jest.fn();
|
||||
returnMetrics([]);
|
||||
});
|
||||
|
||||
it('Gauge to unknown metric: resets from Gauge aggregations to unknown defaults (avg/avg)', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'cpu_usage',
|
||||
type: MetrictypesTypeDTO.gauge,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(
|
||||
<StatefulMetricQueryHarness
|
||||
initialQuery={makeQuery({
|
||||
aggregateAttribute: {
|
||||
key: 'cpu_usage',
|
||||
type: ATTRIBUTE_TYPES.GAUGE,
|
||||
dataType: DataTypes.Float64,
|
||||
},
|
||||
aggregations: [
|
||||
{
|
||||
timeAggregation: 'min',
|
||||
spaceAggregation: 'max',
|
||||
metricName: 'cpu_usage',
|
||||
temporality: '',
|
||||
},
|
||||
] as MetricAggregation[],
|
||||
})}
|
||||
/>,
|
||||
);
|
||||
|
||||
expect(screen.getByTestId('selected-time-agg')).toHaveTextContent('min');
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('max');
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, { target: { value: 'unknown_metric' } });
|
||||
fireEvent.blur(input);
|
||||
|
||||
// Metric not in search results is committed with empty type resets to unknown defaults
|
||||
expect(screen.getByTestId('selected-time-agg')).toHaveTextContent('avg');
|
||||
expect(screen.getByTestId('selected-space-agg')).toHaveTextContent('avg');
|
||||
expect(screen.getByTestId('selected-metric-name')).toHaveTextContent(
|
||||
'unknown_metric',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Summary metric type is treated as Gauge', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
handleSetQueryDataRef.current = jest.fn();
|
||||
returnMetrics([]);
|
||||
});
|
||||
|
||||
it('selecting a Summary metric shows Gauge aggregation options', () => {
|
||||
returnMetrics([
|
||||
makeMetric({
|
||||
metricName: 'rpc_duration_summary',
|
||||
type: MetrictypesTypeDTO.summary,
|
||||
}),
|
||||
]);
|
||||
|
||||
render(<MetricQueryHarness query={makeQuery()} />);
|
||||
|
||||
const input = screen.getByRole('combobox');
|
||||
fireEvent.change(input, {
|
||||
target: { value: 'rpc_duration_summary' },
|
||||
});
|
||||
fireEvent.blur(input);
|
||||
|
||||
expect(getOptionLabels('time-agg-options')).toEqual([
|
||||
'Latest',
|
||||
'Sum',
|
||||
'Avg',
|
||||
'Min',
|
||||
'Max',
|
||||
'Count',
|
||||
'Count Distinct',
|
||||
]);
|
||||
expect(getOptionLabels('space-agg-options')).toEqual([
|
||||
'Sum',
|
||||
'Avg',
|
||||
'Min',
|
||||
'Max',
|
||||
]);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,282 @@
|
||||
import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react';
|
||||
import { flushSync } from 'react-dom';
|
||||
import { AutoComplete, Spin, Typography } from 'antd';
|
||||
import { useListMetrics } from 'api/generated/services/metrics';
|
||||
import {
|
||||
MetricsexplorertypesListMetricDTO,
|
||||
MetrictypesTypeDTO,
|
||||
} from 'api/generated/services/sigNoz.schemas';
|
||||
import { ATTRIBUTE_TYPES } from 'constants/queryBuilder';
|
||||
import { DEBOUNCE_DELAY } from 'constants/queryBuilderFilterConfig';
|
||||
import useDebounce from 'hooks/useDebounce';
|
||||
import {
|
||||
BaseAutocompleteData,
|
||||
DataTypes,
|
||||
} from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { IBuilderQuery } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { MetricAggregation } from 'types/api/v5/queryRange';
|
||||
import { ExtendedSelectOption } from 'types/common/select';
|
||||
import { popupContainer } from 'utils/selectPopupContainer';
|
||||
|
||||
import { selectStyle } from '../QueryBuilderSearch/config';
|
||||
import OptionRenderer from '../QueryBuilderSearch/OptionRenderer';
|
||||
|
||||
import './MetricNameSelector.styles.scss';
|
||||
|
||||
// N.B on the metric name selector behaviour.
|
||||
//
|
||||
// Metric aggregation options resolution:
|
||||
// The component maintains a ref (metricsRef) of the latest API results.
|
||||
// When the user commits a metric name (via dropdown select, blur, or Cmd+Enter),
|
||||
// resolveMetricFromText looks up the metric in metricsRef to determine its type
|
||||
// (Sum, Gauge, Histogram, etc.). If the metric isn't found (e.g. the user typed
|
||||
// a name before the debounced search returned), the type is empty and downstream
|
||||
// treats it as unknown.
|
||||
//
|
||||
// Selection handling:
|
||||
// - Dropdown select: user picks from the dropdown; type is always resolved
|
||||
// since the option came from the current API results.
|
||||
// - Blur: user typed a name and tabbed/clicked away without selecting from
|
||||
// the dropdown. If the name differs from the current metric, it's resolved
|
||||
// and committed. If the input is empty, it resets to the current metric name.
|
||||
// - Cmd/Ctrl+Enter: resolves the typed name and commits it using flushSync
|
||||
// so the state update is processed synchronously before QueryBuilderV2's
|
||||
// onKeyDownCapture fires handleRunQuery. Uses document-level capture phase
|
||||
// to run before React's root-level event dispatch. However, there is still one
|
||||
// need to be handled here. TODO(srikanthccv): enter before n/w req completion
|
||||
//
|
||||
// Edit mode:
|
||||
// When a saved query is loaded, the metric name may be set via aggregations
|
||||
// but aggregateAttribute.type may be missing. Once the API returns metric data,
|
||||
// the component calls onChange with isEditMode=true to backfill the type without
|
||||
// resetting aggregation options.
|
||||
//
|
||||
// Signal source:
|
||||
// When signalSource is 'meter', the API is filtered to meter metrics only.
|
||||
// Changing signalSource clears the input and search text.
|
||||
|
||||
function getAttributeType(
|
||||
metric: MetricsexplorertypesListMetricDTO,
|
||||
): ATTRIBUTE_TYPES | '' {
|
||||
if (metric.type === MetrictypesTypeDTO.sum && !metric.isMonotonic) {
|
||||
return ATTRIBUTE_TYPES.GAUGE;
|
||||
}
|
||||
|
||||
const mapping: Record<MetrictypesTypeDTO, ATTRIBUTE_TYPES> = {
|
||||
[MetrictypesTypeDTO.sum]: ATTRIBUTE_TYPES.SUM,
|
||||
[MetrictypesTypeDTO.gauge]: ATTRIBUTE_TYPES.GAUGE,
|
||||
[MetrictypesTypeDTO.histogram]: ATTRIBUTE_TYPES.HISTOGRAM,
|
||||
[MetrictypesTypeDTO.summary]: ATTRIBUTE_TYPES.GAUGE,
|
||||
[MetrictypesTypeDTO.exponentialhistogram]:
|
||||
ATTRIBUTE_TYPES.EXPONENTIAL_HISTOGRAM,
|
||||
};
|
||||
|
||||
return mapping[metric.type] || '';
|
||||
}
|
||||
|
||||
function toAutocompleteData(
|
||||
metricName: string,
|
||||
type: string,
|
||||
): BaseAutocompleteData {
|
||||
return { key: metricName, type, dataType: DataTypes.Float64 };
|
||||
}
|
||||
|
||||
export type MetricNameSelectorProps = {
|
||||
query: IBuilderQuery;
|
||||
onChange: (value: BaseAutocompleteData, isEditMode?: boolean) => void;
|
||||
disabled?: boolean;
|
||||
defaultValue?: string;
|
||||
onSelect?: (value: BaseAutocompleteData) => void;
|
||||
signalSource?: 'meter' | '';
|
||||
};
|
||||
|
||||
export const MetricNameSelector = memo(function MetricNameSelector({
|
||||
query,
|
||||
onChange,
|
||||
disabled,
|
||||
defaultValue,
|
||||
onSelect,
|
||||
signalSource,
|
||||
}: MetricNameSelectorProps): JSX.Element {
|
||||
const currentMetricName =
|
||||
(query.aggregations?.[0] as MetricAggregation)?.metricName ||
|
||||
query.aggregateAttribute?.key ||
|
||||
'';
|
||||
|
||||
const [inputValue, setInputValue] = useState<string>(
|
||||
defaultValue || currentMetricName,
|
||||
);
|
||||
const [searchText, setSearchText] = useState<string>(currentMetricName);
|
||||
|
||||
const metricsRef = useRef<MetricsexplorertypesListMetricDTO[]>([]);
|
||||
const selectedFromDropdownRef = useRef(false);
|
||||
const prevSignalSourceRef = useRef(signalSource);
|
||||
|
||||
useEffect(() => {
|
||||
setInputValue(defaultValue || currentMetricName);
|
||||
}, [defaultValue, currentMetricName]);
|
||||
|
||||
useEffect(() => {
|
||||
if (prevSignalSourceRef.current !== signalSource) {
|
||||
prevSignalSourceRef.current = signalSource;
|
||||
setSearchText('');
|
||||
setInputValue('');
|
||||
}
|
||||
}, [signalSource]);
|
||||
|
||||
const debouncedValue = useDebounce(searchText, DEBOUNCE_DELAY);
|
||||
|
||||
const { isFetching, isError, data: listMetricsData } = useListMetrics(
|
||||
{
|
||||
searchText: debouncedValue,
|
||||
limit: 100,
|
||||
source: signalSource || undefined,
|
||||
} as Record<string, unknown>,
|
||||
{
|
||||
query: {
|
||||
keepPreviousData: false,
|
||||
retry: 2,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const metrics = useMemo(() => listMetricsData?.data?.metrics ?? [], [
|
||||
listMetricsData,
|
||||
]);
|
||||
|
||||
useEffect(() => {
|
||||
metricsRef.current = metrics;
|
||||
}, [metrics]);
|
||||
|
||||
const optionsData = useMemo((): ExtendedSelectOption[] => {
|
||||
if (!metrics.length) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return metrics.map((metric) => ({
|
||||
label: (
|
||||
<OptionRenderer
|
||||
label={metric.metricName}
|
||||
value={metric.metricName}
|
||||
dataType={DataTypes.Float64}
|
||||
type={getAttributeType(metric) || ''}
|
||||
/>
|
||||
),
|
||||
value: metric.metricName,
|
||||
key: metric.metricName,
|
||||
}));
|
||||
}, [metrics]);
|
||||
|
||||
useEffect(() => {
|
||||
const metricName = (query.aggregations?.[0] as MetricAggregation)?.metricName;
|
||||
const hasAggregateAttributeType = query.aggregateAttribute?.type;
|
||||
|
||||
if (metricName && !hasAggregateAttributeType && metrics.length > 0) {
|
||||
const found = metrics.find((m) => m.metricName === metricName);
|
||||
if (found) {
|
||||
onChange(
|
||||
toAutocompleteData(found.metricName, getAttributeType(found)),
|
||||
true,
|
||||
);
|
||||
}
|
||||
}
|
||||
}, [metrics, query.aggregations, query.aggregateAttribute?.type, onChange]);
|
||||
|
||||
const resolveMetricFromText = useCallback(
|
||||
(text: string): BaseAutocompleteData => {
|
||||
const found = metricsRef.current.find((m) => m.metricName === text);
|
||||
if (found) {
|
||||
return toAutocompleteData(found.metricName, getAttributeType(found));
|
||||
}
|
||||
return toAutocompleteData(text, '');
|
||||
},
|
||||
[],
|
||||
);
|
||||
|
||||
const placeholder = useMemo(() => {
|
||||
if (signalSource === 'meter') {
|
||||
return 'Search for a meter metric...';
|
||||
}
|
||||
return 'Search for a metric...';
|
||||
}, [signalSource]);
|
||||
|
||||
const handleChange = useCallback((value: string): void => {
|
||||
setInputValue(value);
|
||||
}, []);
|
||||
|
||||
const handleSearch = useCallback((value: string): void => {
|
||||
setSearchText(value);
|
||||
selectedFromDropdownRef.current = false;
|
||||
}, []);
|
||||
|
||||
const handleSelect = useCallback(
|
||||
(value: string): void => {
|
||||
selectedFromDropdownRef.current = true;
|
||||
const resolved = resolveMetricFromText(value);
|
||||
onChange(resolved);
|
||||
if (onSelect) {
|
||||
onSelect(resolved);
|
||||
}
|
||||
setSearchText('');
|
||||
},
|
||||
[onChange, onSelect, resolveMetricFromText],
|
||||
);
|
||||
|
||||
const handleBlur = useCallback(() => {
|
||||
if (selectedFromDropdownRef.current) {
|
||||
selectedFromDropdownRef.current = false;
|
||||
return;
|
||||
}
|
||||
|
||||
const typedValue = inputValue?.trim() || '';
|
||||
if (typedValue && typedValue !== currentMetricName) {
|
||||
onChange(resolveMetricFromText(typedValue));
|
||||
} else if (!typedValue && currentMetricName) {
|
||||
setInputValue(currentMetricName);
|
||||
}
|
||||
}, [inputValue, currentMetricName, onChange, resolveMetricFromText]);
|
||||
|
||||
useEffect(() => {
|
||||
const handleKeyDown = (e: KeyboardEvent): void => {
|
||||
if (e.key === 'Enter' && (e.metaKey || e.ctrlKey)) {
|
||||
const typedValue = inputValue?.trim() || '';
|
||||
if (typedValue && typedValue !== currentMetricName) {
|
||||
flushSync(() => {
|
||||
onChange(resolveMetricFromText(typedValue));
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
document.addEventListener('keydown', handleKeyDown, true);
|
||||
return (): void => {
|
||||
document.removeEventListener('keydown', handleKeyDown, true);
|
||||
};
|
||||
}, [inputValue, currentMetricName, onChange, resolveMetricFromText]);
|
||||
|
||||
return (
|
||||
<AutoComplete
|
||||
className="metric-name-selector"
|
||||
getPopupContainer={popupContainer}
|
||||
style={selectStyle}
|
||||
filterOption={false}
|
||||
placeholder={placeholder}
|
||||
onSearch={handleSearch}
|
||||
onChange={handleChange}
|
||||
notFoundContent={
|
||||
isFetching ? (
|
||||
<Spin size="small" />
|
||||
) : isError ? (
|
||||
<Typography.Text type="danger" style={{ fontSize: 12 }}>
|
||||
Failed to load metrics
|
||||
</Typography.Text>
|
||||
) : null
|
||||
}
|
||||
options={optionsData}
|
||||
value={inputValue}
|
||||
onBlur={handleBlur}
|
||||
onSelect={handleSelect}
|
||||
disabled={disabled}
|
||||
/>
|
||||
);
|
||||
});
|
||||
@@ -0,0 +1,2 @@
|
||||
export type { MetricNameSelectorProps } from './MetricNameSelector';
|
||||
export { MetricNameSelector } from './MetricNameSelector';
|
||||
@@ -2,6 +2,7 @@ export { AggregatorFilter } from './AggregatorFilter';
|
||||
export { BuilderUnitsFilter } from './BuilderUnitsFilter';
|
||||
export { GroupByFilter } from './GroupByFilter';
|
||||
export { HavingFilter } from './HavingFilter';
|
||||
export { MetricNameSelector } from './MetricNameSelector';
|
||||
export { OperatorsSelect } from './OperatorsSelect';
|
||||
export { OrderByFilter } from './OrderByFilter';
|
||||
export { ReduceToFilter } from './ReduceToFilter';
|
||||
|
||||
@@ -257,7 +257,9 @@ function TimeSeriesView({
|
||||
chartData[0]?.length === 0 &&
|
||||
!isLoading &&
|
||||
!isError &&
|
||||
dataSource === DataSource.METRICS && <EmptyMetricsSearch />}
|
||||
dataSource === DataSource.METRICS && (
|
||||
<EmptyMetricsSearch hasQueryResult={data !== undefined} />
|
||||
)}
|
||||
|
||||
{!isLoading &&
|
||||
!isError &&
|
||||
|
||||
@@ -248,19 +248,12 @@ export const useQueryOperations: UseQueryOperations = ({
|
||||
);
|
||||
|
||||
const handleChangeAggregatorAttribute = useCallback(
|
||||
(
|
||||
value: BaseAutocompleteData,
|
||||
isEditMode?: boolean,
|
||||
attributeKeys?: BaseAutocompleteData[],
|
||||
): void => {
|
||||
(value: BaseAutocompleteData, isEditMode?: boolean): void => {
|
||||
const newQuery: IBuilderQuery = {
|
||||
...query,
|
||||
aggregateAttribute: value,
|
||||
};
|
||||
|
||||
const getAttributeKeyFromMetricName = (metricName: string): string =>
|
||||
attributeKeys?.find((key) => key.key === metricName)?.type || '';
|
||||
|
||||
if (
|
||||
newQuery.dataSource === DataSource.METRICS &&
|
||||
entityVersion === ENTITY_VERSION_V4
|
||||
@@ -311,9 +304,7 @@ export const useQueryOperations: UseQueryOperations = ({
|
||||
// Get current metric info
|
||||
const currentMetricType = newQuery.aggregateAttribute?.type || '';
|
||||
|
||||
const prevMetricType = previousMetricInfo?.type
|
||||
? previousMetricInfo.type
|
||||
: getAttributeKeyFromMetricName(previousMetricInfo?.name || '');
|
||||
const prevMetricType = previousMetricInfo?.type || '';
|
||||
|
||||
// Check if metric type has changed by comparing with tracked previous values
|
||||
const metricTypeChanged =
|
||||
@@ -374,7 +365,7 @@ export const useQueryOperations: UseQueryOperations = ({
|
||||
|
||||
// Handled query with unknown metric to avoid 400 and 500 errors
|
||||
// With metric value typed and not available then - time - 'avg', space - 'avg'
|
||||
// If not typed - time - 'rate', space - 'sum', op - 'count'
|
||||
// If not typed - time - 'avg', space - 'sum'
|
||||
if (isEmpty(newQuery.aggregateAttribute?.type)) {
|
||||
if (!isEmpty(newQuery.aggregateAttribute?.key)) {
|
||||
newQuery.aggregations = [
|
||||
@@ -388,7 +379,7 @@ export const useQueryOperations: UseQueryOperations = ({
|
||||
} else {
|
||||
newQuery.aggregations = [
|
||||
{
|
||||
timeAggregation: MetricAggregateOperator.COUNT,
|
||||
timeAggregation: MetricAggregateOperator.AVG,
|
||||
metricName: newQuery.aggregateAttribute?.key || '',
|
||||
temporality: '',
|
||||
spaceAggregation: MetricAggregateOperator.SUM,
|
||||
@@ -408,6 +399,29 @@ export const useQueryOperations: UseQueryOperations = ({
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
// Override with safe defaults when metric type is unknown to avoid 400/500 errors
|
||||
if (isEmpty(newQuery.aggregateAttribute?.type)) {
|
||||
if (!isEmpty(newQuery.aggregateAttribute?.key)) {
|
||||
newQuery.aggregations = [
|
||||
{
|
||||
timeAggregation: MetricAggregateOperator.AVG,
|
||||
metricName: newQuery.aggregateAttribute?.key || '',
|
||||
temporality: '',
|
||||
spaceAggregation: MetricAggregateOperator.AVG,
|
||||
},
|
||||
];
|
||||
} else {
|
||||
newQuery.aggregations = [
|
||||
{
|
||||
timeAggregation: MetricAggregateOperator.AVG,
|
||||
metricName: newQuery.aggregateAttribute?.key || '',
|
||||
temporality: '',
|
||||
spaceAggregation: MetricAggregateOperator.SUM,
|
||||
},
|
||||
];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ export const stepIntervalUnchanged = {
|
||||
{
|
||||
metricName: '',
|
||||
temporality: '',
|
||||
timeAggregation: 'count',
|
||||
timeAggregation: 'avg',
|
||||
spaceAggregation: 'sum',
|
||||
reduceTo: ReduceOperators.AVG,
|
||||
},
|
||||
@@ -177,7 +177,7 @@ export const replaceVariables = {
|
||||
{
|
||||
metricName: '',
|
||||
temporality: '',
|
||||
timeAggregation: 'count',
|
||||
timeAggregation: 'avg',
|
||||
spaceAggregation: 'sum',
|
||||
reduceTo: ReduceOperators.AVG,
|
||||
},
|
||||
@@ -267,7 +267,7 @@ export const defaultOutput = {
|
||||
reduceTo: ReduceOperators.AVG,
|
||||
spaceAggregation: 'sum',
|
||||
temporality: '',
|
||||
timeAggregation: 'count',
|
||||
timeAggregation: 'avg',
|
||||
},
|
||||
],
|
||||
filter: { expression: '' },
|
||||
@@ -392,7 +392,7 @@ export const outputWithFunctions = {
|
||||
{
|
||||
metricName: '',
|
||||
temporality: '',
|
||||
timeAggregation: 'count',
|
||||
timeAggregation: 'avg',
|
||||
spaceAggregation: 'sum',
|
||||
reduceTo: ReduceOperators.AVG,
|
||||
},
|
||||
@@ -429,7 +429,7 @@ export const outputWithFunctions = {
|
||||
{
|
||||
metricName: '',
|
||||
temporality: '',
|
||||
timeAggregation: 'count',
|
||||
timeAggregation: 'avg',
|
||||
spaceAggregation: 'sum',
|
||||
reduceTo: ReduceOperators.AVG,
|
||||
},
|
||||
|
||||
@@ -72,7 +72,6 @@ export type UseQueryOperations = (
|
||||
handleChangeAggregatorAttribute: (
|
||||
value: BaseAutocompleteData,
|
||||
isEditMode?: boolean,
|
||||
attributeKeys?: BaseAutocompleteData[],
|
||||
) => void;
|
||||
handleChangeDataSource: (newSource: DataSource) => void;
|
||||
handleDeleteQuery: () => void;
|
||||
|
||||
@@ -6205,10 +6205,10 @@
|
||||
"@types/history" "^4.7.11"
|
||||
"@types/react" "*"
|
||||
|
||||
"@types/react-syntax-highlighter@15.5.7":
|
||||
version "15.5.7"
|
||||
resolved "https://registry.yarnpkg.com/@types/react-syntax-highlighter/-/react-syntax-highlighter-15.5.7.tgz#bd29020ccb118543d88779848f99059b64b02d0f"
|
||||
integrity sha512-bo5fEO5toQeyCp0zVHBeggclqf5SQ/Z5blfFmjwO5dkMVGPgmiwZsJh9nu/Bo5L7IHTuGWrja6LxJVE2uB5ZrQ==
|
||||
"@types/react-syntax-highlighter@15.5.13":
|
||||
version "15.5.13"
|
||||
resolved "https://registry.yarnpkg.com/@types/react-syntax-highlighter/-/react-syntax-highlighter-15.5.13.tgz#c5baf62a3219b3bf28d39cfea55d0a49a263d1f2"
|
||||
integrity sha512-uLGJ87j6Sz8UaBAooU0T6lWJ0dBmjZgN1PZTrj05TNql2/XpC6+4HhMT5syIdFUUt+FASfCeLLv4kBygNU+8qA==
|
||||
dependencies:
|
||||
"@types/react" "*"
|
||||
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
"github.com/SigNoz/signoz/pkg/modules/promote"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
|
||||
"github.com/SigNoz/signoz/pkg/modules/session"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
@@ -47,7 +46,6 @@ type provider struct {
|
||||
gatewayHandler gateway.Handler
|
||||
fieldsHandler fields.Handler
|
||||
authzHandler authz.Handler
|
||||
rawDataExportHandler rawdataexport.Handler
|
||||
zeusHandler zeus.Handler
|
||||
querierHandler querier.Handler
|
||||
}
|
||||
@@ -69,7 +67,6 @@ func NewFactory(
|
||||
gatewayHandler gateway.Handler,
|
||||
fieldsHandler fields.Handler,
|
||||
authzHandler authz.Handler,
|
||||
rawDataExportHandler rawdataexport.Handler,
|
||||
zeusHandler zeus.Handler,
|
||||
querierHandler querier.Handler,
|
||||
) factory.ProviderFactory[apiserver.APIServer, apiserver.Config] {
|
||||
@@ -94,7 +91,6 @@ func NewFactory(
|
||||
gatewayHandler,
|
||||
fieldsHandler,
|
||||
authzHandler,
|
||||
rawDataExportHandler,
|
||||
zeusHandler,
|
||||
querierHandler,
|
||||
)
|
||||
@@ -121,7 +117,6 @@ func newProvider(
|
||||
gatewayHandler gateway.Handler,
|
||||
fieldsHandler fields.Handler,
|
||||
authzHandler authz.Handler,
|
||||
rawDataExportHandler rawdataexport.Handler,
|
||||
zeusHandler zeus.Handler,
|
||||
querierHandler querier.Handler,
|
||||
) (apiserver.APIServer, error) {
|
||||
@@ -146,7 +141,6 @@ func newProvider(
|
||||
gatewayHandler: gatewayHandler,
|
||||
fieldsHandler: fieldsHandler,
|
||||
authzHandler: authzHandler,
|
||||
rawDataExportHandler: rawDataExportHandler,
|
||||
zeusHandler: zeusHandler,
|
||||
querierHandler: querierHandler,
|
||||
}
|
||||
@@ -221,10 +215,6 @@ func (provider *provider) AddToRouter(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := provider.addRawDataExportRoutes(router); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := provider.addZeusRoutes(router); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
package signozapiserver
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/http/handler"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/exporttypes"
|
||||
v5 "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func (provider *provider) addRawDataExportRoutes(router *mux.Router) error {
|
||||
if err := router.Handle("/api/v1/export_raw_data", handler.New(provider.authZ.ViewAccess(provider.rawDataExportHandler.ExportRawData), handler.OpenAPIDef{
|
||||
ID: "HandleExportRawDataGET",
|
||||
Tags: []string{"logs", "traces"},
|
||||
Summary: "Export raw data",
|
||||
Description: "This endpoints allows simple query exporting raw data for traces and logs",
|
||||
RequestQuery: new(exporttypes.ExportRawDataQueryParams),
|
||||
RequestContentType: "",
|
||||
Response: nil,
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest},
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodGet).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := router.Handle("/api/v1/export_raw_data", handler.New(provider.authZ.ViewAccess(provider.rawDataExportHandler.ExportRawData), handler.OpenAPIDef{
|
||||
ID: "HandleExportRawDataPOST",
|
||||
Tags: []string{"logs", "traces"},
|
||||
Summary: "Export raw data",
|
||||
Description: "This endpoints allows complex query exporting raw data for traces and logs",
|
||||
Request: new(v5.QueryRangeRequest),
|
||||
RequestQuery: new(exporttypes.ExportRawDataFormatQueryParam),
|
||||
RequestContentType: "application/json",
|
||||
Response: nil,
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest},
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymeter"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/ctxtypes"
|
||||
@@ -56,11 +57,81 @@ func NewModule(ts telemetrystore.TelemetryStore, telemetryMetadataStore telemetr
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(srikanthccv): use metadata store to fetch metric metadata
|
||||
func (m *module) ListMetrics(ctx context.Context, orgID valuer.UUID, params *metricsexplorertypes.ListMetricsParams) (*metricsexplorertypes.ListMetricsResponse, error) {
|
||||
if err := params.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if params.Source == "meter" {
|
||||
return m.listMeterMetrics(ctx, params)
|
||||
}
|
||||
return m.listMetrics(ctx, orgID, params)
|
||||
}
|
||||
|
||||
func (m *module) listMeterMetrics(ctx context.Context, params *metricsexplorertypes.ListMetricsParams) (*metricsexplorertypes.ListMetricsResponse, error) {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
sb.Select(
|
||||
"metric_name",
|
||||
"any(description) AS description",
|
||||
"any(type) AS metric_type",
|
||||
"any(unit) AS metric_unit",
|
||||
"argMax(temporality, unix_milli) AS temporality",
|
||||
"any(is_monotonic) AS is_monotonic",
|
||||
)
|
||||
sb.From(fmt.Sprintf("%s.%s", telemetrymeter.DBName, telemetrymeter.SamplesTableName))
|
||||
|
||||
if params.Start != nil && params.End != nil {
|
||||
sb.Where(sb.Between("unix_milli", *params.Start, *params.End))
|
||||
}
|
||||
|
||||
if params.Search != "" {
|
||||
searchLower := strings.ToLower(params.Search)
|
||||
searchLower = strings.ReplaceAll(searchLower, "%", "\\%")
|
||||
searchLower = strings.ReplaceAll(searchLower, "_", "\\_")
|
||||
sb.Where(sb.Like("lower(metric_name)", fmt.Sprintf("%%%s%%", searchLower)))
|
||||
}
|
||||
|
||||
sb.GroupBy("metric_name")
|
||||
sb.OrderBy("metric_name ASC")
|
||||
sb.Limit(params.Limit)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
valueCtx := ctxtypes.SetClickhouseMaxThreads(ctx, m.config.TelemetryStore.Threads)
|
||||
db := m.telemetryStore.ClickhouseDB()
|
||||
rows, err := db.Query(valueCtx, query, args...)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to list meter metrics")
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
metrics := make([]metricsexplorertypes.ListMetric, 0)
|
||||
for rows.Next() {
|
||||
var metric metricsexplorertypes.ListMetric
|
||||
if err := rows.Scan(
|
||||
&metric.MetricName,
|
||||
&metric.Description,
|
||||
&metric.MetricType,
|
||||
&metric.MetricUnit,
|
||||
&metric.Temporality,
|
||||
&metric.IsMonotonic,
|
||||
); err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "failed to scan meter metric")
|
||||
}
|
||||
metrics = append(metrics, metric)
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "error iterating meter metrics")
|
||||
}
|
||||
|
||||
return &metricsexplorertypes.ListMetricsResponse{
|
||||
Metrics: metrics,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *module) listMetrics(ctx context.Context, orgID valuer.UUID, params *metricsexplorertypes.ListMetricsParams) (*metricsexplorertypes.ListMetricsResponse, error) {
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
sb.Select("DISTINCT metric_name")
|
||||
|
||||
|
||||
@@ -6,18 +6,18 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/binding"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/exporttypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -31,52 +31,129 @@ func NewHandler(module rawdataexport.Module) rawdataexport.Handler {
|
||||
return &handler{module: module}
|
||||
}
|
||||
|
||||
// ExportRawData handles data export requests.
|
||||
//
|
||||
// API Documentation:
|
||||
// Endpoint: GET /api/v1/export_raw_data
|
||||
//
|
||||
// Query Parameters:
|
||||
//
|
||||
// - source (optional): Type of data to export ["logs" (default), "metrics", "traces"]
|
||||
// Note: Currently only "logs" is fully supported
|
||||
//
|
||||
// - format (optional): Output format ["csv" (default), "jsonl"]
|
||||
//
|
||||
// - start (required): Start time for query (Unix timestamp in nanoseconds)
|
||||
//
|
||||
// - end (required): End time for query (Unix timestamp in nanoseconds)
|
||||
//
|
||||
// - limit (optional): Maximum number of rows to export
|
||||
// Constraints: Must be positive and cannot exceed MAX_EXPORT_ROW_COUNT_LIMIT
|
||||
//
|
||||
// - filter (optional): Filter expression to apply to the query
|
||||
//
|
||||
// - columns (optional): Specific columns to include in export
|
||||
// Default: all columns are returned
|
||||
// Format: ["context.field:type", "context.field", "field"]
|
||||
//
|
||||
// - order_by (optional): Sorting specification ["column:direction" or "context.field:type:direction"]
|
||||
// Direction: "asc" or "desc"
|
||||
// Default: ["timestamp:desc", "id:desc"]
|
||||
//
|
||||
// Response Headers:
|
||||
// - Content-Type: "text/csv" or "application/x-ndjson"
|
||||
// - Content-Encoding: "gzip" (handled by HTTP middleware)
|
||||
// - Content-Disposition: "attachment; filename=\"data_exported.[format]\""
|
||||
// - Cache-Control: "no-cache"
|
||||
// - Vary: "Accept-Encoding"
|
||||
// - Transfer-Encoding: "chunked"
|
||||
// - Trailers: X-Response-Complete
|
||||
//
|
||||
// Response Format:
|
||||
//
|
||||
// CSV: Headers in first row, data in subsequent rows
|
||||
// JSONL: One JSON object per line
|
||||
//
|
||||
// Example Usage:
|
||||
//
|
||||
// Basic CSV export:
|
||||
// GET /api/v1/export_raw_data?start=1693612800000000000&end=1693699199000000000
|
||||
//
|
||||
// Export with columns and format:
|
||||
// GET /api/v1/export_raw_data?start=1693612800000000000&end=1693699199000000000&format=jsonl
|
||||
// &columns=timestamp&columns=severity&columns=message
|
||||
//
|
||||
// Export with filter and ordering:
|
||||
// GET /api/v1/export_raw_data?start=1693612800000000000&end=1693699199000000000
|
||||
// &filter=severity="error"&order_by=timestamp:desc&limit=1000
|
||||
func (handler *handler) ExportRawData(rw http.ResponseWriter, r *http.Request) {
|
||||
var queryRangeRequest qbtypes.QueryRangeRequest
|
||||
var format string
|
||||
source, err := getExportQuerySource(r.URL.Query())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
var params exporttypes.ExportRawDataQueryParams
|
||||
if err := binding.Query.BindQuery(r.URL.Query(), ¶ms); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
params.Normalize()
|
||||
if err := params.Validate(); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
format = params.Format
|
||||
queryRangeRequest = buildQueryRangeRequest(¶ms)
|
||||
case http.MethodPost:
|
||||
var formatParam exporttypes.ExportRawDataFormatQueryParam
|
||||
if err := binding.Query.BindQuery(r.URL.Query(), &formatParam); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
format = formatParam.Format
|
||||
if err := json.NewDecoder(r.Body).Decode(&queryRangeRequest); err != nil {
|
||||
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid request body: %v", err))
|
||||
return
|
||||
}
|
||||
switch source {
|
||||
case "logs":
|
||||
handler.exportLogs(rw, r)
|
||||
case "traces":
|
||||
handler.exportTraces(rw, r)
|
||||
case "metrics":
|
||||
handler.exportMetrics(rw, r)
|
||||
default:
|
||||
render.Error(rw, errors.Newf(errors.TypeMethodNotAllowed, errors.CodeMethodNotAllowed, "method not allowed, only GET/POST supported"))
|
||||
return
|
||||
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid source: must be logs"))
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateSpecForExport(&queryRangeRequest); err != nil {
|
||||
func (handler *handler) exportMetrics(rw http.ResponseWriter, r *http.Request) {
|
||||
render.Error(rw, errors.Newf(errors.TypeUnsupported, errors.CodeUnsupported, "metrics export is not yet supported"))
|
||||
}
|
||||
|
||||
func (handler *handler) exportTraces(rw http.ResponseWriter, r *http.Request) {
|
||||
render.Error(rw, errors.Newf(errors.TypeUnsupported, errors.CodeUnsupported, "traces export is not yet supported"))
|
||||
}
|
||||
|
||||
func (handler *handler) exportLogs(rw http.ResponseWriter, r *http.Request) {
|
||||
// Set up response headers
|
||||
rw.Header().Set("Cache-Control", "no-cache")
|
||||
rw.Header().Set("Vary", "Accept-Encoding") // Indicate that response varies based on Accept-Encoding
|
||||
rw.Header().Set("Access-Control-Expose-Headers", "Content-Disposition, X-Response-Complete")
|
||||
rw.Header().Set("Trailer", "X-Response-Complete")
|
||||
rw.Header().Set("Transfer-Encoding", "chunked")
|
||||
|
||||
queryParams := r.URL.Query()
|
||||
|
||||
startTime, endTime, err := getExportQueryTimeRange(queryParams)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := validateAndApplyDefaultExportLimits(queryRangeRequest.CompositeQuery.Queries); err != nil {
|
||||
limit, err := getExportQueryLimit(queryParams)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Use default OrderBy if not specified
|
||||
queryRangeRequest.UseDefaultOrderBy()
|
||||
format, err := getExportQueryFormat(queryParams)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Set appropriate content type and filename
|
||||
filename := fmt.Sprintf("data_exported_%s.%s", time.Now().Format("2006-01-02_150405"), format)
|
||||
rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||
|
||||
filterExpression := queryParams.Get("filter")
|
||||
|
||||
orderByExpression, err := getExportQueryOrderBy(queryParams)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
columns := getExportQueryColumns(queryParams)
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
@@ -90,134 +167,70 @@ func (handler *handler) ExportRawData(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
setExportResponseHeaders(rw, format)
|
||||
queryRangeRequest := qbtypes.QueryRangeRequest{
|
||||
Start: startTime,
|
||||
End: endTime,
|
||||
RequestType: qbtypes.RequestTypeRaw,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
spec := qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Name: "raw",
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: filterExpression,
|
||||
},
|
||||
Limit: limit,
|
||||
Order: orderByExpression,
|
||||
}
|
||||
|
||||
spec.SelectFields = columns
|
||||
|
||||
queryRangeRequest.CompositeQuery.Queries[0].Spec = spec
|
||||
|
||||
// This will signal Export module to stop sending data
|
||||
doneChan := make(chan any)
|
||||
defer close(doneChan)
|
||||
rowChan, errChan := handler.module.ExportRawData(r.Context(), orgID, &queryRangeRequest, doneChan)
|
||||
|
||||
isComplete, err := handler.executeExport(rowChan, errChan, format, rw)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
rw.Header().Set("X-Response-Complete", strconv.FormatBool(isComplete))
|
||||
}
|
||||
var isComplete bool
|
||||
|
||||
// validateSpecForExport validates query specs
|
||||
func validateSpecForExport(req *qbtypes.QueryRangeRequest) error {
|
||||
|
||||
queries := req.CompositeQuery.Queries
|
||||
|
||||
// If the trace operator query is not present, and there are multiple queries, return an error
|
||||
if req.TraceOperatorQueryIndex() == -1 && len(queries) > 1 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "multiple queries not allowed without a trace operator query")
|
||||
}
|
||||
|
||||
for idx := range queries {
|
||||
switch spec := queries[idx].Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation],
|
||||
qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation],
|
||||
qbtypes.QueryBuilderTraceOperator:
|
||||
// Supported spec types
|
||||
default:
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported query at index %d type: %T", idx, spec)
|
||||
}
|
||||
}
|
||||
|
||||
err := req.Validate(qbtypes.WithSkipLimitValidation())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateAndApplyDefaultExportLimits(queries []qbtypes.QueryEnvelope) error {
|
||||
for idx := range queries {
|
||||
limit := queries[idx].GetLimit()
|
||||
if limit == 0 {
|
||||
limit = DefaultExportRowCountLimit
|
||||
} else if limit < 0 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be positive")
|
||||
} else if limit > MaxExportRowCountLimit {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit cannot be more than %d", MaxExportRowCountLimit)
|
||||
}
|
||||
queries[idx].SetLimit(limit)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildQueryEnvelope creates a QueryEnvelope with a QueryBuilderQuery for the given aggregation type.
|
||||
func buildQueryEnvelope[T any](signal telemetrytypes.Signal, filter *qbtypes.Filter, limit int, order []qbtypes.OrderBy, selectFields []telemetrytypes.TelemetryFieldKey) qbtypes.QueryEnvelope {
|
||||
return qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[T]{
|
||||
Signal: signal,
|
||||
Filter: filter,
|
||||
Limit: limit,
|
||||
Order: order,
|
||||
SelectFields: selectFields,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// buildQueryRangeRequest builds a QueryRangeRequest from already-bound and validated GET query params.
|
||||
func buildQueryRangeRequest(params *exporttypes.ExportRawDataQueryParams) qbtypes.QueryRangeRequest {
|
||||
var query qbtypes.QueryEnvelope
|
||||
switch params.Signal {
|
||||
case telemetrytypes.SignalLogs:
|
||||
query = buildQueryEnvelope[qbtypes.LogAggregation](params.Signal, ¶ms.Filter, params.Limit, params.Order, params.SelectFields)
|
||||
case telemetrytypes.SignalTraces:
|
||||
query = buildQueryEnvelope[qbtypes.TraceAggregation](params.Signal, ¶ms.Filter, params.Limit, params.Order, params.SelectFields)
|
||||
}
|
||||
|
||||
return qbtypes.QueryRangeRequest{
|
||||
Start: params.Start,
|
||||
End: params.End,
|
||||
RequestType: qbtypes.RequestTypeRaw,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{query},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// setExportResponseHeaders sets common HTTP headers for export responses.
|
||||
func setExportResponseHeaders(rw http.ResponseWriter, format string) {
|
||||
rw.Header().Set("Cache-Control", "no-cache")
|
||||
rw.Header().Set("Vary", "Accept-Encoding")
|
||||
rw.Header().Set("Access-Control-Expose-Headers", "Content-Disposition, X-Response-Complete")
|
||||
rw.Header().Set("Trailer", "X-Response-Complete")
|
||||
rw.Header().Set("Transfer-Encoding", "chunked")
|
||||
filename := fmt.Sprintf("data_exported_%s.%s", time.Now().Format("2006-01-02_150405"), format)
|
||||
rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||
}
|
||||
|
||||
// executeExport streams data from rowChan to the response writer in the specified format.
|
||||
func (handler *handler) executeExport(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, format string, rw http.ResponseWriter) (bool, error) {
|
||||
switch format {
|
||||
case "csv", "":
|
||||
rw.Header().Set("Content-Type", "text/csv")
|
||||
csvWriter := csv.NewWriter(rw)
|
||||
isComplete, err := handler.exportRawDataCSV(rowChan, errChan, csvWriter)
|
||||
isComplete, err = handler.exportLogsCSV(rowChan, errChan, csvWriter)
|
||||
if err != nil {
|
||||
return false, err
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
csvWriter.Flush()
|
||||
return isComplete, nil
|
||||
case "jsonl":
|
||||
rw.Header().Set("Content-Type", "application/x-ndjson")
|
||||
return handler.exportRawDataJSONL(rowChan, errChan, rw)
|
||||
isComplete, err = handler.exportLogsJSONL(rowChan, errChan, rw)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
default:
|
||||
return false, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid format: must be csv or jsonl")
|
||||
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid format: must be csv or jsonl"))
|
||||
return
|
||||
}
|
||||
|
||||
rw.Header().Set("X-Response-Complete", strconv.FormatBool(isComplete))
|
||||
}
|
||||
|
||||
// exportRawDataCSV is a generic CSV export function that works with any raw data (logs, traces, etc.)
|
||||
func (handler *handler) exportRawDataCSV(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, csvWriter *csv.Writer) (bool, error) {
|
||||
|
||||
func (handler *handler) exportLogsCSV(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, csvWriter *csv.Writer) (bool, error) {
|
||||
var header []string
|
||||
headerToIndexMapping := make(map[string]int)
|
||||
|
||||
headerToIndexMapping := make(map[string]int, len(header))
|
||||
|
||||
totalBytes := uint64(0)
|
||||
for {
|
||||
@@ -255,8 +268,8 @@ func (handler *handler) exportRawDataCSV(rowChan <-chan *qbtypes.RawRow, errChan
|
||||
}
|
||||
}
|
||||
|
||||
// exportRawDataJSONL is a generic JSONL export function that works with any raw data (logs, traces, etc.)
|
||||
func (handler *handler) exportRawDataJSONL(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, writer io.Writer) (bool, error) {
|
||||
func (handler *handler) exportLogsJSONL(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, writer io.Writer) (bool, error) {
|
||||
|
||||
totalBytes := uint64(0)
|
||||
for {
|
||||
select {
|
||||
@@ -264,11 +277,9 @@ func (handler *handler) exportRawDataJSONL(rowChan <-chan *qbtypes.RawRow, errCh
|
||||
if !ok {
|
||||
return true, nil
|
||||
}
|
||||
jsonBytes, err := json.Marshal(row.Data)
|
||||
if err != nil {
|
||||
return false, errors.NewUnexpectedf(errors.CodeInternal, "error marshaling JSON: %s", err)
|
||||
}
|
||||
totalBytes += uint64(len(jsonBytes)) + 1
|
||||
// Handle JSON format (JSONL - one object per line)
|
||||
jsonBytes, _ := json.Marshal(row.Data)
|
||||
totalBytes += uint64(len(jsonBytes)) + 1 // +1 for newline
|
||||
|
||||
if _, err := writer.Write(jsonBytes); err != nil {
|
||||
return false, errors.NewUnexpectedf(errors.CodeInternal, "error writing JSON: %s", err)
|
||||
@@ -288,33 +299,74 @@ func (handler *handler) exportRawDataJSONL(rowChan <-chan *qbtypes.RawRow, errCh
|
||||
}
|
||||
}
|
||||
|
||||
// priorityColumns defines the columns that should appear first in the CSV output, in order.
|
||||
var priorityColumns = []string{"timestamp", "id"}
|
||||
func getExportQuerySource(queryParams url.Values) (string, error) {
|
||||
switch queryParams.Get("source") {
|
||||
case "logs", "":
|
||||
return "logs", nil
|
||||
case "metrics":
|
||||
return "metrics", errors.NewInvalidInputf(errors.CodeInvalidInput, "metrics export not yet supported")
|
||||
case "traces":
|
||||
return "traces", errors.NewInvalidInputf(errors.CodeInvalidInput, "traces export not yet supported")
|
||||
default:
|
||||
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid source: must be logs, metrics or traces")
|
||||
}
|
||||
}
|
||||
|
||||
func getExportQueryFormat(queryParams url.Values) (string, error) {
|
||||
switch queryParams.Get("format") {
|
||||
case "csv", "":
|
||||
return "csv", nil
|
||||
case "jsonl":
|
||||
return "jsonl", nil
|
||||
default:
|
||||
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid format: must be csv or jsonl")
|
||||
}
|
||||
}
|
||||
|
||||
func getExportQueryLimit(queryParams url.Values) (int, error) {
|
||||
|
||||
limitStr := queryParams.Get("limit")
|
||||
if limitStr == "" {
|
||||
return DefaultExportRowCountLimit, nil
|
||||
} else {
|
||||
limit, err := strconv.Atoi(limitStr)
|
||||
if err != nil {
|
||||
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid limit format: %s", err.Error())
|
||||
}
|
||||
if limit <= 0 {
|
||||
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be positive")
|
||||
}
|
||||
if limit > MaxExportRowCountLimit {
|
||||
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "limit cannot be more than %d", MaxExportRowCountLimit)
|
||||
}
|
||||
return limit, nil
|
||||
}
|
||||
}
|
||||
|
||||
func getExportQueryTimeRange(queryParams url.Values) (uint64, uint64, error) {
|
||||
|
||||
startTimeStr := queryParams.Get("start")
|
||||
endTimeStr := queryParams.Get("end")
|
||||
|
||||
if startTimeStr == "" || endTimeStr == "" {
|
||||
return 0, 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "start and end time are required")
|
||||
}
|
||||
startTime, err := strconv.ParseUint(startTimeStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid start time format: %s", err.Error())
|
||||
}
|
||||
endTime, err := strconv.ParseUint(endTimeStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid end time format: %s", err.Error())
|
||||
}
|
||||
return startTime, endTime, nil
|
||||
}
|
||||
|
||||
func constructCSVHeaderFromQueryResponse(data map[string]any) []string {
|
||||
header := make([]string, 0, len(data))
|
||||
for key := range data {
|
||||
header = append(header, key)
|
||||
}
|
||||
// This is to ensure CSV output is consistent across multiple queries
|
||||
slices.SortFunc(header, func(a, b string) int {
|
||||
ai, bi := slices.Index(priorityColumns, a), slices.Index(priorityColumns, b)
|
||||
switch {
|
||||
case ai != -1 && bi != -1:
|
||||
return ai - bi
|
||||
case ai != -1:
|
||||
return -1
|
||||
case bi != -1:
|
||||
return 1
|
||||
default:
|
||||
if a < b {
|
||||
return -1
|
||||
} else if a > b {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
})
|
||||
return header
|
||||
}
|
||||
|
||||
@@ -375,12 +427,9 @@ func constructCSVRecordFromQueryResponse(data map[string]any, headerToIndexMappi
|
||||
valueStr = v.String()
|
||||
|
||||
default:
|
||||
jsonBytes, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
valueStr = fmt.Sprintf("%v", v)
|
||||
} else {
|
||||
valueStr = string(jsonBytes)
|
||||
}
|
||||
// For all other complex types (maps, structs, etc.)
|
||||
jsonBytes, _ := json.Marshal(v)
|
||||
valueStr = string(jsonBytes)
|
||||
}
|
||||
|
||||
record[index] = sanitizeForCSV(valueStr)
|
||||
@@ -389,6 +438,26 @@ func constructCSVRecordFromQueryResponse(data map[string]any, headerToIndexMappi
|
||||
return record
|
||||
}
|
||||
|
||||
// getExportQueryColumns parses the "columns" query parameters and returns a slice of TelemetryFieldKey structs.
|
||||
// Each column should be a valid telemetry field key in the format "context.field:type" or "context.field" or "field"
|
||||
func getExportQueryColumns(queryParams url.Values) []telemetrytypes.TelemetryFieldKey {
|
||||
columnParams := queryParams["columns"]
|
||||
|
||||
columns := make([]telemetrytypes.TelemetryFieldKey, 0, len(columnParams))
|
||||
|
||||
for _, columnStr := range columnParams {
|
||||
// Skip empty strings
|
||||
columnStr = strings.TrimSpace(columnStr)
|
||||
if columnStr == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
columns = append(columns, telemetrytypes.GetFieldKeyFromKeyText(columnStr))
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
func getsizeOfStringSlice(slice []string) uint64 {
|
||||
var totalBytes uint64
|
||||
for _, str := range slice {
|
||||
@@ -396,3 +465,52 @@ func getsizeOfStringSlice(slice []string) uint64 {
|
||||
}
|
||||
return totalBytes
|
||||
}
|
||||
|
||||
// getExportQueryOrderBy parses the "order_by" query parameters and returns a slice of OrderBy structs.
|
||||
// Each "order_by" parameter should be in the format "column:direction"
|
||||
// Each "column" should be a valid telemetry field key in the format "context.field:type" or "context.field" or "field"
|
||||
func getExportQueryOrderBy(queryParams url.Values) ([]qbtypes.OrderBy, error) {
|
||||
orderByParam := queryParams.Get("order_by")
|
||||
|
||||
orderByParam = strings.TrimSpace(orderByParam)
|
||||
if orderByParam == "" {
|
||||
return telemetrylogs.DefaultLogsV2SortingOrder, nil
|
||||
}
|
||||
|
||||
parts := strings.Split(orderByParam, ":")
|
||||
if len(parts) != 2 && len(parts) != 3 {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order_by format: %s, should be <column>:<direction>", orderByParam)
|
||||
}
|
||||
|
||||
column := strings.Join(parts[:len(parts)-1], ":")
|
||||
direction := parts[len(parts)-1]
|
||||
|
||||
orderDirection, ok := qbtypes.OrderDirectionMap[direction]
|
||||
if !ok {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order_by direction: %s, should be one of %s, %s", direction, qbtypes.OrderDirectionAsc, qbtypes.OrderDirectionDesc)
|
||||
}
|
||||
|
||||
orderByKey := telemetrytypes.GetFieldKeyFromKeyText(column)
|
||||
|
||||
orderBy := []qbtypes.OrderBy{
|
||||
{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: orderByKey,
|
||||
},
|
||||
Direction: orderDirection,
|
||||
},
|
||||
}
|
||||
|
||||
// If we are ordering by the timestamp column, also order by the ID column
|
||||
if orderByKey.Name == telemetrylogs.LogsV2TimestampColumn {
|
||||
orderBy = append(orderBy, qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
Direction: orderDirection,
|
||||
})
|
||||
}
|
||||
return orderBy, nil
|
||||
}
|
||||
|
||||
@@ -2,85 +2,58 @@ package implrawdataexport
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/http/binding"
|
||||
"github.com/SigNoz/signoz/pkg/types/exporttypes"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestExportRawDataQueryParams_BindingDefaults(t *testing.T) {
|
||||
var params exporttypes.ExportRawDataQueryParams
|
||||
err := binding.Query.BindQuery(url.Values{}, ¶ms)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "csv", params.Format)
|
||||
assert.Equal(t, DefaultExportRowCountLimit, params.Limit)
|
||||
}
|
||||
|
||||
func logQuery(limit int) qbtypes.QueryEnvelope {
|
||||
return qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{Limit: limit},
|
||||
}
|
||||
}
|
||||
|
||||
func traceQuery(limit int) qbtypes.QueryEnvelope {
|
||||
return qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{Limit: limit},
|
||||
}
|
||||
}
|
||||
|
||||
func traceOperatorQuery(limit int) qbtypes.QueryEnvelope {
|
||||
return qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeTraceOperator,
|
||||
Spec: qbtypes.QueryBuilderTraceOperator{Limit: limit, Expression: "A"},
|
||||
}
|
||||
}
|
||||
|
||||
func makeRequest(queries ...qbtypes.QueryEnvelope) qbtypes.QueryRangeRequest {
|
||||
return qbtypes.QueryRangeRequest{
|
||||
Start: 1000000000000,
|
||||
End: 1000003600000,
|
||||
RequestType: qbtypes.RequestTypeRaw,
|
||||
CompositeQuery: qbtypes.CompositeQuery{Queries: queries},
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateSpecForExport(t *testing.T) {
|
||||
func TestGetExportQuerySource(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req qbtypes.QueryRangeRequest
|
||||
expectedError bool
|
||||
name string
|
||||
queryParams url.Values
|
||||
expectedSource string
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "single log query",
|
||||
req: makeRequest(logQuery(0)),
|
||||
name: "default logs source",
|
||||
queryParams: url.Values{},
|
||||
expectedSource: "logs",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "single trace query",
|
||||
req: makeRequest(traceQuery(0)),
|
||||
name: "explicit logs source",
|
||||
queryParams: url.Values{"source": {"logs"}},
|
||||
expectedSource: "logs",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "trace operator alone",
|
||||
req: makeRequest(traceOperatorQuery(0)),
|
||||
name: "metrics source - not supported",
|
||||
queryParams: url.Values{"source": {"metrics"}},
|
||||
expectedSource: "metrics",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "multiple queries without trace operator",
|
||||
req: makeRequest(logQuery(0), traceQuery(0)),
|
||||
expectedError: true,
|
||||
name: "traces source - not supported",
|
||||
queryParams: url.Values{"source": {"traces"}},
|
||||
expectedSource: "traces",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "unsupported query type",
|
||||
req: makeRequest(qbtypes.QueryEnvelope{Type: qbtypes.QueryTypeBuilder, Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{}}),
|
||||
expectedError: true,
|
||||
name: "invalid source",
|
||||
queryParams: url.Values{"source": {"invalid"}},
|
||||
expectedSource: "",
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateSpecForExport(&tt.req)
|
||||
source, err := getExportQuerySource(tt.queryParams)
|
||||
assert.Equal(t, tt.expectedSource, source)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
@@ -90,69 +63,456 @@ func TestValidateSpecForExport(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAndApplyDefaultExportLimits(t *testing.T) {
|
||||
func TestGetExportQueryFormat(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queries []qbtypes.QueryEnvelope
|
||||
expectedError bool
|
||||
checkQueries func(t *testing.T, queries []qbtypes.QueryEnvelope)
|
||||
name string
|
||||
queryParams url.Values
|
||||
expectedFormat string
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "single log query, zero limit gets default",
|
||||
queries: makeRequest(logQuery(0)).CompositeQuery.Queries,
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.Equal(t, DefaultExportRowCountLimit, q[0].GetLimit())
|
||||
},
|
||||
name: "default csv format",
|
||||
queryParams: url.Values{},
|
||||
expectedFormat: "csv",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "single log query, valid limit kept",
|
||||
queries: makeRequest(logQuery(1000)).CompositeQuery.Queries,
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.Equal(t, 1000, q[0].GetLimit())
|
||||
},
|
||||
name: "explicit csv format",
|
||||
queryParams: url.Values{"format": {"csv"}},
|
||||
expectedFormat: "csv",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "single log query, max limit kept",
|
||||
queries: makeRequest(logQuery(MaxExportRowCountLimit)).CompositeQuery.Queries,
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.Equal(t, MaxExportRowCountLimit, q[0].GetLimit())
|
||||
},
|
||||
name: "jsonl format",
|
||||
queryParams: url.Values{"format": {"jsonl"}},
|
||||
expectedFormat: "jsonl",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "single log query, limit exceeds max",
|
||||
queries: makeRequest(logQuery(MaxExportRowCountLimit + 1)).CompositeQuery.Queries,
|
||||
name: "invalid format",
|
||||
queryParams: url.Values{"format": {"xml"}},
|
||||
expectedFormat: "",
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
format, err := getExportQueryFormat(tt.queryParams)
|
||||
assert.Equal(t, tt.expectedFormat, format)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExportQueryLimit(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams url.Values
|
||||
expectedLimit int
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "default limit",
|
||||
queryParams: url.Values{},
|
||||
expectedLimit: DefaultExportRowCountLimit,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "valid limit",
|
||||
queryParams: url.Values{"limit": {"5000"}},
|
||||
expectedLimit: 5000,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "maximum limit",
|
||||
queryParams: url.Values{"limit": {strconv.Itoa(MaxExportRowCountLimit)}},
|
||||
expectedLimit: MaxExportRowCountLimit,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "limit exceeds maximum",
|
||||
queryParams: url.Values{"limit": {"100000"}},
|
||||
expectedLimit: 0,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "single log query, negative limit",
|
||||
queries: makeRequest(logQuery(-1)).CompositeQuery.Queries,
|
||||
name: "invalid limit format",
|
||||
queryParams: url.Values{"limit": {"invalid"}},
|
||||
expectedLimit: 0,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "single trace query, zero limit gets default",
|
||||
queries: makeRequest(traceQuery(0)).CompositeQuery.Queries,
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.Equal(t, DefaultExportRowCountLimit, q[0].GetLimit())
|
||||
name: "negative limit",
|
||||
queryParams: url.Values{"limit": {"-100"}},
|
||||
expectedLimit: 0,
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
limit, err := getExportQueryLimit(tt.queryParams)
|
||||
assert.Equal(t, tt.expectedLimit, limit)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExportQueryTimeRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams url.Values
|
||||
expectedStartTime uint64
|
||||
expectedEndTime uint64
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "valid time range",
|
||||
queryParams: url.Values{
|
||||
"start": {"1640995200"},
|
||||
"end": {"1641081600"},
|
||||
},
|
||||
expectedStartTime: 1640995200,
|
||||
expectedEndTime: 1641081600,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "missing start time",
|
||||
queryParams: url.Values{"end": {"1641081600"}},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "missing end time",
|
||||
queryParams: url.Values{"start": {"1640995200"}},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "missing both times",
|
||||
queryParams: url.Values{},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid start time format",
|
||||
queryParams: url.Values{
|
||||
"start": {"invalid"},
|
||||
"end": {"1641081600"},
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid end time format",
|
||||
queryParams: url.Values{
|
||||
"start": {"1640995200"},
|
||||
"end": {"invalid"},
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
startTime, endTime, err := getExportQueryTimeRange(tt.queryParams)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedStartTime, startTime)
|
||||
assert.Equal(t, tt.expectedEndTime, endTime)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExportQueryColumns(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams url.Values
|
||||
expectedColumns []telemetrytypes.TelemetryFieldKey
|
||||
}{
|
||||
{
|
||||
name: "no columns specified",
|
||||
queryParams: url.Values{},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{},
|
||||
},
|
||||
{
|
||||
name: "single column",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp"},
|
||||
},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "trace operator alone, zero limit gets default",
|
||||
queries: makeRequest(traceOperatorQuery(0)).CompositeQuery.Queries,
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.Equal(t, DefaultExportRowCountLimit, q[0].GetLimit())
|
||||
name: "multiple columns",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp", "message", "level"},
|
||||
},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "message"},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty column name (should be skipped)",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp", "", "level"},
|
||||
},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "whitespace column name (should be skipped)",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp", " ", "level"},
|
||||
},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid column name with data type",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp", "attribute.user:string", "level"},
|
||||
},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "user", FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeString},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid column name with dot notation",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp", "attribute.user.string", "level"},
|
||||
},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "user.string", FieldContext: telemetrytypes.FieldContextAttribute},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateAndApplyDefaultExportLimits(tt.queries)
|
||||
columns := getExportQueryColumns(tt.queryParams)
|
||||
assert.Equal(t, len(tt.expectedColumns), len(columns))
|
||||
for i, expectedCol := range tt.expectedColumns {
|
||||
assert.Equal(t, expectedCol, columns[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExportQueryOrderBy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams url.Values
|
||||
expectedOrder []qbtypes.OrderBy
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "no order specified",
|
||||
queryParams: url.Values{},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2TimestampColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "single order error, direction not specified",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"timestamp"},
|
||||
},
|
||||
expectedOrder: nil,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "single order no error",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"timestamp:asc"},
|
||||
},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2TimestampColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "multiple orders",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"timestamp:asc", "body:desc", "id:asc"},
|
||||
},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2TimestampColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "empty order name (should be skipped)",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"timestamp:asc", "", "id:asc"},
|
||||
},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2TimestampColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "whitespace order name (should be skipped)",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"timestamp:asc", " ", "id:asc"},
|
||||
},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2TimestampColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid order name (should error out)",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"attributes.user:", "id:asc"},
|
||||
},
|
||||
expectedOrder: nil,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid order name (should be included)",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"attribute.user:string:desc", "id:asc"},
|
||||
},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "valid order name (should be included)",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"attribute.user.string:desc", "id:asc"},
|
||||
},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.string",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
order, err := getExportQueryOrderBy(tt.queryParams)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
if tt.checkQueries != nil {
|
||||
tt.checkQueries(t, tt.queries)
|
||||
assert.Equal(t, len(tt.expectedOrder), len(order))
|
||||
for i, expectedOrd := range tt.expectedOrder {
|
||||
assert.Equal(t, expectedOrd, order[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -169,8 +529,13 @@ func TestConstructCSVHeaderFromQueryResponse(t *testing.T) {
|
||||
|
||||
header := constructCSVHeaderFromQueryResponse(data)
|
||||
|
||||
// Priority columns come first in order, then the rest alphabetically.
|
||||
assert.Equal(t, []string{"timestamp", "id", "level", "message"}, header)
|
||||
// Since map iteration order is not guaranteed, check that all expected keys are present
|
||||
expectedKeys := []string{"timestamp", "message", "level", "id"}
|
||||
assert.Equal(t, len(expectedKeys), len(header))
|
||||
|
||||
for _, key := range expectedKeys {
|
||||
assert.Contains(t, header, key)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstructCSVRecordFromQueryResponse(t *testing.T) {
|
||||
|
||||
@@ -23,18 +23,8 @@ func NewModule(querier querier.Querier) rawdataexport.Module {
|
||||
|
||||
func (m *Module) ExportRawData(ctx context.Context, orgID valuer.UUID, rangeRequest *qbtypes.QueryRangeRequest, doneChan chan any) (chan *qbtypes.RawRow, chan error) {
|
||||
|
||||
traceOperatorQueryIndex := rangeRequest.TraceOperatorQueryIndex()
|
||||
|
||||
queries := rangeRequest.CompositeQuery.Queries
|
||||
|
||||
// If the trace operator query is present, mark the queries other than trace operator as disabled
|
||||
if traceOperatorQueryIndex > -1 {
|
||||
for idx := range len(queries) {
|
||||
if idx != traceOperatorQueryIndex {
|
||||
queries[idx].SetDisabled(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
spec := rangeRequest.CompositeQuery.Queries[0].Spec.(qbtypes.QueryBuilderQuery[qbtypes.LogAggregation])
|
||||
rowCountLimit := spec.Limit
|
||||
|
||||
rowChan := make(chan *qbtypes.RawRow, 1)
|
||||
errChan := make(chan error, 1)
|
||||
@@ -48,62 +38,52 @@ func (m *Module) ExportRawData(ctx context.Context, orgID valuer.UUID, rangeRequ
|
||||
defer close(errChan)
|
||||
defer close(rowChan)
|
||||
|
||||
if traceOperatorQueryIndex > -1 {
|
||||
// If the trace operator query is present, we need to export the data for the trace operator query only
|
||||
exportRawDataForSingleQuery(m.querier, contextWithTimeout, orgID, rangeRequest, rowChan, errChan, doneChan, traceOperatorQueryIndex)
|
||||
} else {
|
||||
// If the trace operator query is not present, we need to export the data for the first query only
|
||||
exportRawDataForSingleQuery(m.querier, contextWithTimeout, orgID, rangeRequest, rowChan, errChan, doneChan, 0)
|
||||
rowCount := 0
|
||||
|
||||
for rowCount < rowCountLimit {
|
||||
spec.Limit = min(ChunkSize, rowCountLimit-rowCount)
|
||||
spec.Offset = rowCount
|
||||
|
||||
rangeRequest.CompositeQuery.Queries[0].Spec = spec
|
||||
|
||||
response, err := m.querier.QueryRange(contextWithTimeout, orgID, rangeRequest)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
newRowsCount := 0
|
||||
for _, result := range response.Data.Results {
|
||||
resultData, ok := result.(*qbtypes.RawData)
|
||||
if !ok {
|
||||
errChan <- errors.NewInternalf(errors.CodeInternal, "expected RawData, got %T", result)
|
||||
return
|
||||
}
|
||||
|
||||
newRowsCount += len(resultData.Rows)
|
||||
for _, row := range resultData.Rows {
|
||||
select {
|
||||
case rowChan <- row:
|
||||
case <-doneChan:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
errChan <- ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Break if we did not receive any new rows
|
||||
if newRowsCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
rowCount += newRowsCount
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
return rowChan, errChan
|
||||
|
||||
}
|
||||
|
||||
func exportRawDataForSingleQuery(querier querier.Querier, ctx context.Context, orgID valuer.UUID, rangeRequest *qbtypes.QueryRangeRequest, rowChan chan *qbtypes.RawRow, errChan chan error, doneChan chan any, queryIndex int) {
|
||||
|
||||
queries := rangeRequest.CompositeQuery.Queries
|
||||
rowCountLimit := queries[queryIndex].GetLimit()
|
||||
rowCount := 0
|
||||
|
||||
for rowCount < rowCountLimit {
|
||||
chunkSize := min(ChunkSize, rowCountLimit-rowCount)
|
||||
queries[queryIndex].SetLimit(chunkSize)
|
||||
queries[queryIndex].SetOffset(rowCount)
|
||||
|
||||
response, err := querier.QueryRange(ctx, orgID, rangeRequest)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
newRowsCount := 0
|
||||
for _, result := range response.Data.Results {
|
||||
resultData, ok := result.(*qbtypes.RawData)
|
||||
if !ok {
|
||||
errChan <- errors.NewInternalf(errors.CodeInternal, "expected RawData, got %T", result)
|
||||
return
|
||||
}
|
||||
|
||||
newRowsCount += len(resultData.Rows)
|
||||
for _, row := range resultData.Rows {
|
||||
select {
|
||||
case rowChan <- row:
|
||||
case <-doneChan:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
errChan <- ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rowCount += newRowsCount
|
||||
|
||||
// Stop if we received fewer rows than requested — no more data available
|
||||
if newRowsCount < chunkSize {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/savedviewtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
@@ -24,7 +25,7 @@ func NewModule(sqlstore sqlstore.SQLStore) savedview.Module {
|
||||
}
|
||||
|
||||
func (module *module) GetViewsForFilters(ctx context.Context, orgID string, sourcePage string, name string, category string) ([]*v3.SavedView, error) {
|
||||
var views []types.SavedView
|
||||
var views []savedviewtypes.SavedView
|
||||
var err error
|
||||
if len(category) == 0 {
|
||||
err = module.sqlstore.BunDB().NewSelect().Model(&views).Where("org_id = ? AND source_page = ? AND name LIKE ?", orgID, sourcePage, "%"+name+"%").Scan(ctx)
|
||||
@@ -76,7 +77,7 @@ func (module *module) CreateView(ctx context.Context, orgID string, view v3.Save
|
||||
createBy := claims.Email
|
||||
updatedBy := claims.Email
|
||||
|
||||
dbView := types.SavedView{
|
||||
dbView := savedviewtypes.SavedView{
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: createdAt,
|
||||
UpdatedAt: updatedAt,
|
||||
@@ -105,7 +106,7 @@ func (module *module) CreateView(ctx context.Context, orgID string, view v3.Save
|
||||
}
|
||||
|
||||
func (module *module) GetView(ctx context.Context, orgID string, uuid valuer.UUID) (*v3.SavedView, error) {
|
||||
var view types.SavedView
|
||||
var view savedviewtypes.SavedView
|
||||
err := module.sqlstore.BunDB().NewSelect().Model(&view).Where("org_id = ? AND id = ?", orgID, uuid.StringValue()).Scan(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInternalf(err, errors.CodeInternal, "error in getting saved view")
|
||||
@@ -146,7 +147,7 @@ func (module *module) UpdateView(ctx context.Context, orgID string, uuid valuer.
|
||||
updatedBy := claims.Email
|
||||
|
||||
_, err = module.sqlstore.BunDB().NewUpdate().
|
||||
Model(&types.SavedView{}).
|
||||
Model(&savedviewtypes.SavedView{}).
|
||||
Set("updated_at = ?, updated_by = ?, name = ?, category = ?, source_page = ?, tags = ?, data = ?, extra_data = ?",
|
||||
updatedAt, updatedBy, view.Name, view.Category, view.SourcePage, strings.Join(view.Tags, ","), data, view.ExtraData).
|
||||
Where("id = ?", uuid.StringValue()).
|
||||
@@ -160,7 +161,7 @@ func (module *module) UpdateView(ctx context.Context, orgID string, uuid valuer.
|
||||
|
||||
func (module *module) DeleteView(ctx context.Context, orgID string, uuid valuer.UUID) error {
|
||||
_, err := module.sqlstore.BunDB().NewDelete().
|
||||
Model(&types.SavedView{}).
|
||||
Model(&savedviewtypes.SavedView{}).
|
||||
Where("id = ?", uuid.StringValue()).
|
||||
Where("org_id = ?", orgID).
|
||||
Exec(ctx)
|
||||
@@ -171,7 +172,7 @@ func (module *module) DeleteView(ctx context.Context, orgID string, uuid valuer.
|
||||
}
|
||||
|
||||
func (module *module) Collect(ctx context.Context, orgID valuer.UUID) (map[string]any, error) {
|
||||
savedViews := []*types.SavedView{}
|
||||
savedViews := []*savedviewtypes.SavedView{}
|
||||
|
||||
err := module.
|
||||
sqlstore.
|
||||
@@ -184,5 +185,5 @@ func (module *module) Collect(ctx context.Context, orgID valuer.UUID) (map[strin
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return types.NewStatsFromSavedViews(savedViews), nil
|
||||
return savedviewtypes.NewStatsFromSavedViews(savedViews), nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
root "github.com/SigNoz/signoz/pkg/modules/user"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
@@ -462,7 +463,7 @@ func (h *handler) UpdateAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email.String())) {
|
||||
if slices.Contains(integrationtypes.AllIntegrationUserEmails, integrationtypes.IntegrationUserEmail(createdByUser.Email.String())) {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
|
||||
return
|
||||
}
|
||||
@@ -507,7 +508,7 @@ func (h *handler) RevokeAPIKey(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(createdByUser.Email.String())) {
|
||||
if slices.Contains(integrationtypes.AllIntegrationUserEmails, integrationtypes.IntegrationUserEmail(createdByUser.Email.String())) {
|
||||
render.Error(w, errors.Newf(errors.TypeInvalidInput, errors.CodeInvalidInput, "API Keys for integration users cannot be revoked"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/emailtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/roletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/dustin/go-humanize"
|
||||
@@ -279,7 +280,7 @@ func (module *Module) DeleteUser(ctx context.Context, orgID valuer.UUID, id stri
|
||||
return errors.WithAdditionalf(err, "cannot delete root user")
|
||||
}
|
||||
|
||||
if slices.Contains(types.AllIntegrationUserEmails, types.IntegrationUserEmail(user.Email.String())) {
|
||||
if slices.Contains(integrationtypes.AllIntegrationUserEmails, integrationtypes.IntegrationUserEmail(user.Email.String())) {
|
||||
return errors.New(errors.TypeForbidden, errors.CodeForbidden, "integration user cannot be deleted")
|
||||
}
|
||||
|
||||
|
||||
@@ -10,15 +10,16 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type cloudProviderAccountsRepository interface {
|
||||
listConnected(ctx context.Context, orgId string, provider string) ([]types.CloudIntegration, *model.ApiError)
|
||||
listConnected(ctx context.Context, orgId string, provider string) ([]integrationtypes.CloudIntegration, *model.ApiError)
|
||||
|
||||
get(ctx context.Context, orgId string, provider string, id string) (*types.CloudIntegration, *model.ApiError)
|
||||
get(ctx context.Context, orgId string, provider string, id string) (*integrationtypes.CloudIntegration, *model.ApiError)
|
||||
|
||||
getConnectedCloudAccount(ctx context.Context, orgId string, provider string, accountID string) (*types.CloudIntegration, *model.ApiError)
|
||||
getConnectedCloudAccount(ctx context.Context, orgId string, provider string, accountID string) (*integrationtypes.CloudIntegration, *model.ApiError)
|
||||
|
||||
// Insert an account or update it by (cloudProvider, id)
|
||||
// for specified non-empty fields
|
||||
@@ -27,11 +28,11 @@ type cloudProviderAccountsRepository interface {
|
||||
orgId string,
|
||||
provider string,
|
||||
id *string,
|
||||
config *types.AccountConfig,
|
||||
config *integrationtypes.AccountConfig,
|
||||
accountId *string,
|
||||
agentReport *types.AgentReport,
|
||||
agentReport *integrationtypes.AgentReport,
|
||||
removedAt *time.Time,
|
||||
) (*types.CloudIntegration, *model.ApiError)
|
||||
) (*integrationtypes.CloudIntegration, *model.ApiError)
|
||||
}
|
||||
|
||||
func newCloudProviderAccountsRepository(store sqlstore.SQLStore) (
|
||||
@@ -48,8 +49,8 @@ type cloudProviderAccountsSQLRepository struct {
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) listConnected(
|
||||
ctx context.Context, orgId string, cloudProvider string,
|
||||
) ([]types.CloudIntegration, *model.ApiError) {
|
||||
accounts := []types.CloudIntegration{}
|
||||
) ([]integrationtypes.CloudIntegration, *model.ApiError) {
|
||||
accounts := []integrationtypes.CloudIntegration{}
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&accounts).
|
||||
@@ -72,8 +73,8 @@ func (r *cloudProviderAccountsSQLRepository) listConnected(
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) get(
|
||||
ctx context.Context, orgId string, provider string, id string,
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
var result types.CloudIntegration
|
||||
) (*integrationtypes.CloudIntegration, *model.ApiError) {
|
||||
var result integrationtypes.CloudIntegration
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
@@ -97,8 +98,8 @@ func (r *cloudProviderAccountsSQLRepository) get(
|
||||
|
||||
func (r *cloudProviderAccountsSQLRepository) getConnectedCloudAccount(
|
||||
ctx context.Context, orgId string, provider string, accountId string,
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
var result types.CloudIntegration
|
||||
) (*integrationtypes.CloudIntegration, *model.ApiError) {
|
||||
var result integrationtypes.CloudIntegration
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
@@ -127,11 +128,11 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
|
||||
orgId string,
|
||||
provider string,
|
||||
id *string,
|
||||
config *types.AccountConfig,
|
||||
config *integrationtypes.AccountConfig,
|
||||
accountId *string,
|
||||
agentReport *types.AgentReport,
|
||||
agentReport *integrationtypes.AgentReport,
|
||||
removedAt *time.Time,
|
||||
) (*types.CloudIntegration, *model.ApiError) {
|
||||
) (*integrationtypes.CloudIntegration, *model.ApiError) {
|
||||
// Insert
|
||||
if id == nil {
|
||||
temp := valuer.GenerateUUID().StringValue()
|
||||
@@ -181,7 +182,7 @@ func (r *cloudProviderAccountsSQLRepository) upsert(
|
||||
)
|
||||
}
|
||||
|
||||
integration := types.CloudIntegration{
|
||||
integration := integrationtypes.CloudIntegration{
|
||||
OrgID: orgId,
|
||||
Provider: provider,
|
||||
Identifiable: types.Identifiable{ID: valuer.MustNewUUID(*id)},
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
@@ -52,7 +53,7 @@ func NewController(sqlStore sqlstore.SQLStore) (*Controller, error) {
|
||||
}
|
||||
|
||||
type ConnectedAccountsListResponse struct {
|
||||
Accounts []types.Account `json:"accounts"`
|
||||
Accounts []integrationtypes.Account `json:"accounts"`
|
||||
}
|
||||
|
||||
func (c *Controller) ListConnectedAccounts(ctx context.Context, orgId string, cloudProvider string) (
|
||||
@@ -67,7 +68,7 @@ func (c *Controller) ListConnectedAccounts(ctx context.Context, orgId string, cl
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud accounts")
|
||||
}
|
||||
|
||||
connectedAccounts := []types.Account{}
|
||||
connectedAccounts := []integrationtypes.Account{}
|
||||
for _, a := range accountRecords {
|
||||
connectedAccounts = append(connectedAccounts, a.Account())
|
||||
}
|
||||
@@ -81,7 +82,7 @@ type GenerateConnectionUrlRequest struct {
|
||||
// Optional. To be specified for updates.
|
||||
AccountId *string `json:"account_id,omitempty"`
|
||||
|
||||
AccountConfig types.AccountConfig `json:"account_config"`
|
||||
AccountConfig integrationtypes.AccountConfig `json:"account_config"`
|
||||
|
||||
AgentConfig SigNozAgentConfig `json:"agent_config"`
|
||||
}
|
||||
@@ -149,9 +150,9 @@ func (c *Controller) GenerateConnectionUrl(ctx context.Context, orgId string, cl
|
||||
}
|
||||
|
||||
type AccountStatusResponse struct {
|
||||
Id string `json:"id"`
|
||||
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
||||
Status types.AccountStatus `json:"status"`
|
||||
Id string `json:"id"`
|
||||
CloudAccountId *string `json:"cloud_account_id,omitempty"`
|
||||
Status integrationtypes.AccountStatus `json:"status"`
|
||||
}
|
||||
|
||||
func (c *Controller) GetAccountStatus(ctx context.Context, orgId string, cloudProvider string, accountId string) (
|
||||
@@ -217,7 +218,7 @@ func (c *Controller) CheckInAsAgent(ctx context.Context, orgId string, cloudProv
|
||||
))
|
||||
}
|
||||
|
||||
agentReport := types.AgentReport{
|
||||
agentReport := integrationtypes.AgentReport{
|
||||
TimestampMillis: time.Now().UnixMilli(),
|
||||
Data: req.Data,
|
||||
}
|
||||
@@ -286,10 +287,10 @@ func (c *Controller) CheckInAsAgent(ctx context.Context, orgId string, cloudProv
|
||||
}
|
||||
|
||||
type UpdateAccountConfigRequest struct {
|
||||
Config types.AccountConfig `json:"config"`
|
||||
Config integrationtypes.AccountConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateAccountConfig(ctx context.Context, orgId string, cloudProvider string, accountId string, req UpdateAccountConfigRequest) (*types.Account, *model.ApiError) {
|
||||
func (c *Controller) UpdateAccountConfig(ctx context.Context, orgId string, cloudProvider string, accountId string, req UpdateAccountConfigRequest) (*integrationtypes.Account, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -306,7 +307,7 @@ func (c *Controller) UpdateAccountConfig(ctx context.Context, orgId string, clou
|
||||
return &account, nil
|
||||
}
|
||||
|
||||
func (c *Controller) DisconnectAccount(ctx context.Context, orgId string, cloudProvider string, accountId string) (*types.CloudIntegration, *model.ApiError) {
|
||||
func (c *Controller) DisconnectAccount(ctx context.Context, orgId string, cloudProvider string, accountId string) (*integrationtypes.CloudIntegration, *model.ApiError) {
|
||||
if apiErr := validateCloudProviderName(cloudProvider); apiErr != nil {
|
||||
return nil, apiErr
|
||||
}
|
||||
@@ -346,7 +347,7 @@ func (c *Controller) ListServices(
|
||||
return nil, model.WrapApiError(apiErr, "couldn't list cloud services")
|
||||
}
|
||||
|
||||
svcConfigs := map[string]*types.CloudServiceConfig{}
|
||||
svcConfigs := map[string]*integrationtypes.CloudServiceConfig{}
|
||||
if cloudAccountId != nil {
|
||||
activeAccount, apiErr := c.accountsRepo.getConnectedCloudAccount(
|
||||
ctx, orgID, cloudProvider, *cloudAccountId,
|
||||
@@ -441,8 +442,8 @@ func (c *Controller) GetServiceDetails(
|
||||
}
|
||||
|
||||
type UpdateServiceConfigRequest struct {
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config types.CloudServiceConfig `json:"config"`
|
||||
CloudAccountId string `json:"cloud_account_id"`
|
||||
Config integrationtypes.CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (u *UpdateServiceConfigRequest) Validate(def *services.Definition) error {
|
||||
@@ -460,8 +461,8 @@ func (u *UpdateServiceConfigRequest) Validate(def *services.Definition) error {
|
||||
}
|
||||
|
||||
type UpdateServiceConfigResponse struct {
|
||||
Id string `json:"id"`
|
||||
Config types.CloudServiceConfig `json:"config"`
|
||||
Id string `json:"id"`
|
||||
Config integrationtypes.CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
func (c *Controller) UpdateServiceConfig(
|
||||
|
||||
@@ -3,20 +3,20 @@ package cloudintegrations
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/query-service/app/cloudintegrations/services"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
)
|
||||
|
||||
type ServiceSummary struct {
|
||||
services.Metadata
|
||||
|
||||
Config *types.CloudServiceConfig `json:"config"`
|
||||
Config *integrationtypes.CloudServiceConfig `json:"config"`
|
||||
}
|
||||
|
||||
type ServiceDetails struct {
|
||||
services.Definition
|
||||
|
||||
Config *types.CloudServiceConfig `json:"config"`
|
||||
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
|
||||
Config *integrationtypes.CloudServiceConfig `json:"config"`
|
||||
ConnectionStatus *ServiceConnectionStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type AccountStatus struct {
|
||||
@@ -61,7 +61,7 @@ func NewCompiledCollectionStrategy(provider string) (*CompiledCollectionStrategy
|
||||
|
||||
// Helper for accumulating strategies for enabled services.
|
||||
func AddServiceStrategy(serviceType string, cs *CompiledCollectionStrategy,
|
||||
definitionStrat *services.CollectionStrategy, config *types.CloudServiceConfig) error {
|
||||
definitionStrat *services.CollectionStrategy, config *integrationtypes.CloudServiceConfig) error {
|
||||
if definitionStrat.Provider != cs.Provider {
|
||||
return errors.NewInternalf(CodeMismatchCloudProvider, "can't add %s service strategy to compiled strategy for %s",
|
||||
definitionStrat.Provider, cs.Provider)
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
@@ -18,7 +19,7 @@ type ServiceConfigDatabase interface {
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
serviceType string,
|
||||
) (*types.CloudServiceConfig, *model.ApiError)
|
||||
) (*integrationtypes.CloudServiceConfig, *model.ApiError)
|
||||
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
@@ -26,15 +27,15 @@ type ServiceConfigDatabase interface {
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config types.CloudServiceConfig,
|
||||
) (*types.CloudServiceConfig, *model.ApiError)
|
||||
config integrationtypes.CloudServiceConfig,
|
||||
) (*integrationtypes.CloudServiceConfig, *model.ApiError)
|
||||
|
||||
getAllForAccount(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
) (
|
||||
configsBySvcId map[string]*types.CloudServiceConfig,
|
||||
configsBySvcId map[string]*integrationtypes.CloudServiceConfig,
|
||||
apiErr *model.ApiError,
|
||||
)
|
||||
}
|
||||
@@ -56,9 +57,9 @@ func (r *serviceConfigSQLRepository) get(
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
serviceType string,
|
||||
) (*types.CloudServiceConfig, *model.ApiError) {
|
||||
) (*integrationtypes.CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
var result types.CloudIntegrationService
|
||||
var result integrationtypes.CloudIntegrationService
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&result).
|
||||
@@ -89,14 +90,14 @@ func (r *serviceConfigSQLRepository) upsert(
|
||||
cloudProvider string,
|
||||
cloudAccountId string,
|
||||
serviceId string,
|
||||
config types.CloudServiceConfig,
|
||||
) (*types.CloudServiceConfig, *model.ApiError) {
|
||||
config integrationtypes.CloudServiceConfig,
|
||||
) (*integrationtypes.CloudServiceConfig, *model.ApiError) {
|
||||
|
||||
// get cloud integration id from account id
|
||||
// if the account is not connected, we don't need to upsert the config
|
||||
var cloudIntegrationId string
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model((*types.CloudIntegration)(nil)).
|
||||
Model((*integrationtypes.CloudIntegration)(nil)).
|
||||
Column("id").
|
||||
Where("provider = ?", cloudProvider).
|
||||
Where("account_id = ?", cloudAccountId).
|
||||
@@ -111,7 +112,7 @@ func (r *serviceConfigSQLRepository) upsert(
|
||||
))
|
||||
}
|
||||
|
||||
serviceConfig := types.CloudIntegrationService{
|
||||
serviceConfig := integrationtypes.CloudIntegrationService{
|
||||
Identifiable: types.Identifiable{ID: valuer.GenerateUUID()},
|
||||
TimeAuditable: types.TimeAuditable{
|
||||
CreatedAt: time.Now(),
|
||||
@@ -139,8 +140,8 @@ func (r *serviceConfigSQLRepository) getAllForAccount(
|
||||
ctx context.Context,
|
||||
orgID string,
|
||||
cloudAccountId string,
|
||||
) (map[string]*types.CloudServiceConfig, *model.ApiError) {
|
||||
serviceConfigs := []types.CloudIntegrationService{}
|
||||
) (map[string]*integrationtypes.CloudServiceConfig, *model.ApiError) {
|
||||
serviceConfigs := []integrationtypes.CloudIntegrationService{}
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&serviceConfigs).
|
||||
@@ -154,7 +155,7 @@ func (r *serviceConfigSQLRepository) getAllForAccount(
|
||||
))
|
||||
}
|
||||
|
||||
result := map[string]*types.CloudServiceConfig{}
|
||||
result := map[string]*integrationtypes.CloudServiceConfig{}
|
||||
|
||||
for _, r := range serviceConfigs {
|
||||
result[r.Type] = &r.Config
|
||||
|
||||
@@ -572,6 +572,9 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
aH.LicensingAPI.Activate(rw, req)
|
||||
})).Methods(http.MethodGet)
|
||||
|
||||
// Export
|
||||
router.HandleFunc("/api/v1/export_raw_data", am.ViewAccess(aH.Signoz.Handlers.RawDataExport.ExportRawData)).Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/span_percentile", am.ViewAccess(aH.Signoz.Handlers.SpanPercentile.GetSpanPercentileDetails)).Methods(http.MethodPost)
|
||||
|
||||
// Query Filter Analyzer api used to extract metric names and grouping columns from a query
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/dashboardtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/pipelinetypes"
|
||||
ruletypes "github.com/SigNoz/signoz/pkg/types/ruletypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
@@ -107,7 +108,7 @@ type IntegrationsListItem struct {
|
||||
|
||||
type Integration struct {
|
||||
IntegrationDetails
|
||||
Installation *types.InstalledIntegration `json:"installation"`
|
||||
Installation *integrationtypes.InstalledIntegration `json:"installation"`
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
@@ -223,7 +224,7 @@ func (m *Manager) InstallIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
config types.InstalledIntegrationConfig,
|
||||
config integrationtypes.InstalledIntegrationConfig,
|
||||
) (*IntegrationsListItem, *model.ApiError) {
|
||||
integrationDetails, apiErr := m.getIntegrationDetails(ctx, integrationId)
|
||||
if apiErr != nil {
|
||||
@@ -429,7 +430,7 @@ func (m *Manager) getInstalledIntegration(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationId string,
|
||||
) (*types.InstalledIntegration, *model.ApiError) {
|
||||
) (*integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||
iis, apiErr := m.installedIntegrationsRepo.get(
|
||||
ctx, orgId, []string{integrationId},
|
||||
)
|
||||
@@ -457,7 +458,7 @@ func (m *Manager) getInstalledIntegrations(
|
||||
return nil, apiErr
|
||||
}
|
||||
|
||||
installedTypes := utils.MapSlice(installations, func(i types.InstalledIntegration) string {
|
||||
installedTypes := utils.MapSlice(installations, func(i integrationtypes.InstalledIntegration) string {
|
||||
return i.Type
|
||||
})
|
||||
integrationDetails, apiErr := m.availableIntegrationsRepo.get(ctx, installedTypes)
|
||||
|
||||
@@ -4,22 +4,22 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
)
|
||||
|
||||
type InstalledIntegrationsRepo interface {
|
||||
list(ctx context.Context, orgId string) ([]types.InstalledIntegration, *model.ApiError)
|
||||
list(ctx context.Context, orgId string) ([]integrationtypes.InstalledIntegration, *model.ApiError)
|
||||
|
||||
get(
|
||||
ctx context.Context, orgId string, integrationTypes []string,
|
||||
) (map[string]types.InstalledIntegration, *model.ApiError)
|
||||
) (map[string]integrationtypes.InstalledIntegration, *model.ApiError)
|
||||
|
||||
upsert(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationType string,
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*types.InstalledIntegration, *model.ApiError)
|
||||
config integrationtypes.InstalledIntegrationConfig,
|
||||
) (*integrationtypes.InstalledIntegration, *model.ApiError)
|
||||
|
||||
delete(ctx context.Context, orgId string, integrationType string) *model.ApiError
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/query-service/model"
|
||||
"github.com/SigNoz/signoz/pkg/sqlstore"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/integrationtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
@@ -26,8 +27,8 @@ func NewInstalledIntegrationsSqliteRepo(store sqlstore.SQLStore) (
|
||||
func (r *InstalledIntegrationsSqliteRepo) list(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
) ([]types.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []types.InstalledIntegration{}
|
||||
) ([]integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []integrationtypes.InstalledIntegration{}
|
||||
|
||||
err := r.store.BunDB().NewSelect().
|
||||
Model(&integrations).
|
||||
@@ -44,8 +45,8 @@ func (r *InstalledIntegrationsSqliteRepo) list(
|
||||
|
||||
func (r *InstalledIntegrationsSqliteRepo) get(
|
||||
ctx context.Context, orgId string, integrationTypes []string,
|
||||
) (map[string]types.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []types.InstalledIntegration{}
|
||||
) (map[string]integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||
integrations := []integrationtypes.InstalledIntegration{}
|
||||
|
||||
typeValues := []interface{}{}
|
||||
for _, integrationType := range integrationTypes {
|
||||
@@ -62,7 +63,7 @@ func (r *InstalledIntegrationsSqliteRepo) get(
|
||||
))
|
||||
}
|
||||
|
||||
result := map[string]types.InstalledIntegration{}
|
||||
result := map[string]integrationtypes.InstalledIntegration{}
|
||||
for _, ii := range integrations {
|
||||
result[ii.Type] = ii
|
||||
}
|
||||
@@ -74,10 +75,10 @@ func (r *InstalledIntegrationsSqliteRepo) upsert(
|
||||
ctx context.Context,
|
||||
orgId string,
|
||||
integrationType string,
|
||||
config types.InstalledIntegrationConfig,
|
||||
) (*types.InstalledIntegration, *model.ApiError) {
|
||||
config integrationtypes.InstalledIntegrationConfig,
|
||||
) (*integrationtypes.InstalledIntegration, *model.ApiError) {
|
||||
|
||||
integration := types.InstalledIntegration{
|
||||
integration := integrationtypes.InstalledIntegration{
|
||||
Identifiable: types.Identifiable{
|
||||
ID: valuer.GenerateUUID(),
|
||||
},
|
||||
@@ -114,7 +115,7 @@ func (r *InstalledIntegrationsSqliteRepo) delete(
|
||||
ctx context.Context, orgId string, integrationType string,
|
||||
) *model.ApiError {
|
||||
_, dbErr := r.store.BunDB().NewDelete().
|
||||
Model(&types.InstalledIntegration{}).
|
||||
Model(&integrationtypes.InstalledIntegration{}).
|
||||
Where("type = ?", integrationType).
|
||||
Where("org_id = ?", orgId).
|
||||
Exec(ctx)
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
"github.com/SigNoz/signoz/pkg/modules/promote"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
|
||||
"github.com/SigNoz/signoz/pkg/modules/session"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
@@ -58,7 +57,6 @@ func NewOpenAPI(ctx context.Context, instrumentation instrumentation.Instrumenta
|
||||
struct{ gateway.Handler }{},
|
||||
struct{ fields.Handler }{},
|
||||
struct{ authz.Handler }{},
|
||||
struct{ rawdataexport.Handler }{},
|
||||
struct{ zeus.Handler }{},
|
||||
struct{ querier.Handler }{},
|
||||
).New(ctx, instrumentation.ToProviderSettings(), apiserver.Config{})
|
||||
|
||||
@@ -253,7 +253,6 @@ func NewAPIServerProviderFactories(orgGetter organization.Getter, authz authz.Au
|
||||
handlers.GatewayHandler,
|
||||
handlers.Fields,
|
||||
handlers.AuthzHandler,
|
||||
handlers.RawDataExport,
|
||||
handlers.ZeusHandler,
|
||||
handlers.QuerierHandler,
|
||||
),
|
||||
|
||||
@@ -1631,8 +1631,10 @@ func (t *telemetryMetaStore) FetchTemporalityAndTypeMulti(ctx context.Context, q
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// TODO: return error after table migration are run
|
||||
meterMetricsTemporality, meterMetricsTypes, _ := t.fetchMeterSourceMetricsTemporalityAndType(ctx, metricNames...)
|
||||
meterMetricsTemporality, meterMetricsTypes, err := t.fetchMeterSourceMetricsTemporalityAndType(ctx, metricNames...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// For metrics not found in the database, set to Unknown
|
||||
for _, metricName := range metricNames {
|
||||
@@ -1728,6 +1730,7 @@ func (t *telemetryMetaStore) fetchMeterSourceMetricsTemporalityAndType(ctx conte
|
||||
"metric_name",
|
||||
"argMax(temporality, unix_milli) as temporality",
|
||||
"any(type) AS type",
|
||||
"any(is_monotonic) as is_monotonic",
|
||||
).From(t.meterDBName + "." + t.meterFieldsTblName)
|
||||
|
||||
// Filter by metric names (in the temporality column due to data mix-up)
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
package exporttypes
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
// ExportRawDataQueryParams represents the query parameters for the export raw data endpoint
|
||||
type ExportRawDataQueryParams struct {
|
||||
ExportRawDataFormatQueryParam
|
||||
|
||||
// Signal specifies the type of data to export: "logs" or "traces"
|
||||
Signal telemetrytypes.Signal `query:"signal" enum:"logs,traces" required:"true"`
|
||||
|
||||
// Source specifies the type of data to export: "logs" or "traces"
|
||||
// Deprecated: Use Signal instead.
|
||||
Source string `query:"source" deprecated:"true"`
|
||||
|
||||
// Start is the start time for the query (Unix timestamp in nanoseconds)
|
||||
Start uint64 `query:"start"`
|
||||
|
||||
// End is the end time for the query (Unix timestamp in nanoseconds)
|
||||
End uint64 `query:"end"`
|
||||
|
||||
// Limit specifies the maximum number of rows to export
|
||||
Limit int `query:"limit,default=10000" default:"10000" minimum:"1" maximum:"50000"`
|
||||
|
||||
// Filter is a filter expression to apply to the query
|
||||
FilterString string `query:"filter" deprecated:"true"`
|
||||
Filter qbtypes.Filter `query:"filterExpression"`
|
||||
|
||||
// Columns specifies the columns to include in the export
|
||||
// Format: ["context.field:type", "context.field", "field"]
|
||||
Columns []string `query:"columns" deprecated:"true"`
|
||||
|
||||
// SelectFields specifies the columns to include in the export
|
||||
SelectFields []telemetrytypes.TelemetryFieldKey `query:"selectFields"`
|
||||
|
||||
// OrderBy specifies the sorting order
|
||||
// Format: "column:direction" or "context.field:type:direction"
|
||||
// Direction can be "asc" or "desc"
|
||||
// ** Deprecated **
|
||||
OrderBy string `query:"order_by" deprecated:"true"`
|
||||
|
||||
// order by keys and directions
|
||||
Order []qbtypes.OrderBy `query:"order"`
|
||||
}
|
||||
|
||||
type ExportRawDataFormatQueryParam struct {
|
||||
// Format specifies the output format: "csv" or "jsonl"
|
||||
Format string `query:"format,default=csv" default:"csv" enum:"csv,jsonl"`
|
||||
}
|
||||
|
||||
func (p *ExportRawDataQueryParams) Normalize() {
|
||||
if len(p.Order) == 0 && len(p.OrderBy) > 0 {
|
||||
p.Order = parseExportQueryOrderBy(p.OrderBy)
|
||||
}
|
||||
|
||||
if len(p.SelectFields) == 0 && len(p.Columns) != 0 {
|
||||
p.SelectFields = parseExportQueryColumns(p.Columns)
|
||||
|
||||
}
|
||||
|
||||
if len(p.Filter.Expression) == 0 && len(p.FilterString) > 0 {
|
||||
p.Filter = qbtypes.Filter{Expression: p.FilterString}
|
||||
}
|
||||
|
||||
if p.Signal == telemetrytypes.SignalUnspecified && p.Source != "" {
|
||||
p.Signal = telemetrytypes.Signal{String: valuer.NewString(p.Source)}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ExportRawDataQueryParams) Validate() error {
|
||||
|
||||
if p.Signal != telemetrytypes.SignalLogs && p.Signal != telemetrytypes.SignalTraces {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid signal %s", p.Signal).WithAdditional("Allowed values: [logs, traces]")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseExportQueryColumns converts bound column strings to TelemetryFieldKey structs.
|
||||
// Each column should be in the format "context.field:type" or "context.field" or "field"
|
||||
func parseExportQueryColumns(columnParams []string) []telemetrytypes.TelemetryFieldKey {
|
||||
columns := make([]telemetrytypes.TelemetryFieldKey, 0, len(columnParams))
|
||||
for _, columnStr := range columnParams {
|
||||
columnStr = strings.TrimSpace(columnStr)
|
||||
if columnStr == "" {
|
||||
continue
|
||||
}
|
||||
columns = append(columns, telemetrytypes.GetFieldKeyFromKeyText(columnStr))
|
||||
}
|
||||
return columns
|
||||
}
|
||||
|
||||
// parseExportQueryOrderBy converts a bound order_by string to an OrderBy slice.
|
||||
// The string should be in the format "column:direction" and is assumed already validated.
|
||||
func parseExportQueryOrderBy(orderByParam string) []qbtypes.OrderBy {
|
||||
orderByParam = strings.TrimSpace(orderByParam)
|
||||
if orderByParam == "" {
|
||||
return []qbtypes.OrderBy{}
|
||||
}
|
||||
|
||||
parts := strings.Split(orderByParam, ":")
|
||||
column := strings.Join(parts[:len(parts)-1], ":")
|
||||
direction := parts[len(parts)-1]
|
||||
|
||||
return []qbtypes.OrderBy{
|
||||
{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.GetFieldKeyFromKeyText(column),
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionMap[direction],
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,174 +0,0 @@
|
||||
package exporttypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseExportQueryColumns(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []string
|
||||
expectedColumns []telemetrytypes.TelemetryFieldKey
|
||||
}{
|
||||
{
|
||||
name: "empty input",
|
||||
input: []string{},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{},
|
||||
},
|
||||
{
|
||||
name: "single column",
|
||||
input: []string{"timestamp"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple columns",
|
||||
input: []string{"timestamp", "message", "level"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "message"},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty entry is skipped",
|
||||
input: []string{"timestamp", "", "level"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "whitespace-only entry is skipped",
|
||||
input: []string{"timestamp", " ", "level"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "column with context and type",
|
||||
input: []string{"attribute.user:string"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "user", FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeString},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "column with context, dot-notation name",
|
||||
input: []string{"attribute.user.string"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "user.string", FieldContext: telemetrytypes.FieldContextAttribute},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
columns := parseExportQueryColumns(tt.input)
|
||||
assert.Equal(t, len(tt.expectedColumns), len(columns))
|
||||
for i, expected := range tt.expectedColumns {
|
||||
assert.Equal(t, expected, columns[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseExportQueryOrderBy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expectedOrder []qbtypes.OrderBy
|
||||
}{
|
||||
{
|
||||
name: "empty string returns empty slice",
|
||||
input: "",
|
||||
expectedOrder: []qbtypes.OrderBy{},
|
||||
},
|
||||
{
|
||||
name: "simple column asc",
|
||||
input: "timestamp:asc",
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "timestamp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "simple column desc",
|
||||
input: "timestamp:desc",
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "timestamp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "column with context and type qualifier",
|
||||
input: "attribute.user:string:desc",
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "column with context, dot-notation name",
|
||||
input: "attribute.user.string:desc",
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "user.string",
|
||||
FieldContext: telemetrytypes.FieldContextAttribute,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "resource with context and type",
|
||||
input: "resource.service.name:string:asc",
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
order := parseExportQueryOrderBy(tt.input)
|
||||
assert.Equal(t, len(tt.expectedOrder), len(order))
|
||||
for i, expected := range tt.expectedOrder {
|
||||
assert.Equal(t, expected, order[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package types
|
||||
package integrationtypes
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
@@ -26,17 +27,17 @@ var AllIntegrationUserEmails = []IntegrationUserEmail{
|
||||
type InstalledIntegration struct {
|
||||
bun.BaseModel `bun:"table:installed_integration"`
|
||||
|
||||
Identifiable
|
||||
types.Identifiable
|
||||
Type string `json:"type" bun:"type,type:text,unique:org_id_type"`
|
||||
Config InstalledIntegrationConfig `json:"config" bun:"config,type:text"`
|
||||
InstalledAt time.Time `json:"installed_at" bun:"installed_at,default:current_timestamp"`
|
||||
OrgID string `json:"org_id" bun:"org_id,type:text,unique:org_id_type,references:organizations(id),on_delete:cascade"`
|
||||
}
|
||||
|
||||
type InstalledIntegrationConfig map[string]interface{}
|
||||
type InstalledIntegrationConfig map[string]any
|
||||
|
||||
// For serializing from db
|
||||
func (c *InstalledIntegrationConfig) Scan(src interface{}) error {
|
||||
func (c *InstalledIntegrationConfig) Scan(src any) error {
|
||||
var data []byte
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
@@ -67,8 +68,8 @@ func (c *InstalledIntegrationConfig) Value() (driver.Value, error) {
|
||||
type CloudIntegration struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration"`
|
||||
|
||||
Identifiable
|
||||
TimeAuditable
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
Provider string `json:"provider" bun:"provider,type:text,unique:provider_id"`
|
||||
Config *AccountConfig `json:"config" bun:"config,type:text"`
|
||||
AccountID *string `json:"account_id" bun:"account_id,type:text"`
|
||||
@@ -194,8 +195,8 @@ func (r *AgentReport) Value() (driver.Value, error) {
|
||||
type CloudIntegrationService struct {
|
||||
bun.BaseModel `bun:"table:cloud_integration_service,alias:cis"`
|
||||
|
||||
Identifiable
|
||||
TimeAuditable
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
Type string `bun:"type,type:text,notnull,unique:cloud_integration_id_type"`
|
||||
Config CloudServiceConfig `bun:"config,type:text"`
|
||||
CloudIntegrationID string `bun:"cloud_integration_id,type:text,notnull,unique:cloud_integration_id_type,references:cloud_integrations(id),on_delete:cascade"`
|
||||
@@ -301,6 +301,7 @@ type ListMetricsParams struct {
|
||||
End *int64 `query:"end"`
|
||||
Limit int `query:"limit"`
|
||||
Search string `query:"searchText"`
|
||||
Source string `query:"source"`
|
||||
}
|
||||
|
||||
// Validate ensures ListMetricsParams contains acceptable values.
|
||||
|
||||
@@ -447,7 +447,7 @@ type MetricAggregation struct {
|
||||
// space aggregation to apply to the query
|
||||
SpaceAggregation metrictypes.SpaceAggregation `json:"spaceAggregation"`
|
||||
// param for space aggregation if needed
|
||||
ComparisonSpaceAggregationParam *metrictypes.ComparisonSpaceAggregationParam `json:"comparisonSpaceAggregationParam"`
|
||||
ComparisonSpaceAggregationParam *metrictypes.ComparisonSpaceAggregationParam `json:"comparisonSpaceAggregationParam,omitempty"`
|
||||
// table hints to use for the query
|
||||
TableHints *metrictypes.MetricTableHints `json:"-"`
|
||||
// value filter to apply to the query
|
||||
|
||||
@@ -393,59 +393,6 @@ func (r *QueryRangeRequest) HasOrderSpecified() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// UseDefaultOrderBy applies a default order unless query has an explicit order provided.
|
||||
func (r *QueryRangeRequest) UseDefaultOrderBy() {
|
||||
queries := r.CompositeQuery.Queries
|
||||
for idx := range queries {
|
||||
switch queries[idx].Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation],
|
||||
QueryBuilderTraceOperator:
|
||||
if len(queries[idx].GetOrder()) == 0 {
|
||||
queries[idx].SetOrder(
|
||||
[]OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
Direction: OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
if len(queries[idx].GetOrder()) == 0 {
|
||||
queries[idx].SetOrder(
|
||||
[]OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "timestamp",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeNumber},
|
||||
},
|
||||
Direction: OrderDirectionDesc,
|
||||
},
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "id",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
FieldContext: telemetrytypes.FieldContextLog,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString},
|
||||
},
|
||||
Direction: OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) FuncsForQuery(name string) []Function {
|
||||
funcs := []Function{}
|
||||
for _, query := range r.CompositeQuery.Queries {
|
||||
@@ -490,16 +437,6 @@ func (r *QueryRangeRequest) IsAnomalyRequest() (*QueryBuilderQuery[MetricAggrega
|
||||
return &q, hasAnomaly
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) TraceOperatorQueryIndex() int {
|
||||
for idx, query := range r.CompositeQuery.Queries {
|
||||
switch query.Spec.(type) {
|
||||
case TraceOperatorType:
|
||||
return idx
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// We do not support fill gaps for these queries. Maybe support in future?
|
||||
func (r *QueryRangeRequest) SkipFillGaps(name string) bool {
|
||||
for _, query := range r.CompositeQuery.Queries {
|
||||
|
||||
@@ -1,379 +0,0 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
|
||||
// GetExpression returns the expression string.
|
||||
func (q *QueryEnvelope) GetExpression() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Expression
|
||||
case QueryBuilderFormula:
|
||||
return spec.Expression
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetReturnSpansFrom returns the return-spans-from value.
|
||||
func (q *QueryEnvelope) GetReturnSpansFrom() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.ReturnSpansFrom
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetSignal returns the signal.
|
||||
func (q *QueryEnvelope) GetSignal() telemetrytypes.Signal {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Signal
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Signal
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Signal
|
||||
}
|
||||
return telemetrytypes.SignalUnspecified
|
||||
}
|
||||
|
||||
// GetSource returns the source.
|
||||
func (q *QueryEnvelope) GetSource() telemetrytypes.Source {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Source
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Source
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Source
|
||||
}
|
||||
return telemetrytypes.SourceUnspecified
|
||||
}
|
||||
|
||||
// GetQuery returns the raw query string.
|
||||
func (q *QueryEnvelope) GetQuery() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case PromQuery:
|
||||
return spec.Query
|
||||
case ClickHouseQuery:
|
||||
return spec.Query
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetStats returns the PromQL stats flag.
|
||||
func (q *QueryEnvelope) GetStats() bool {
|
||||
switch spec := q.Spec.(type) {
|
||||
case PromQuery:
|
||||
return spec.Stats
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetLeft returns the left query reference of a join.
|
||||
func (q *QueryEnvelope) GetLeft() QueryRef {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
return spec.Left
|
||||
}
|
||||
return QueryRef{}
|
||||
}
|
||||
|
||||
// GetRight returns the right query reference of a join.
|
||||
func (q *QueryEnvelope) GetRight() QueryRef {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
return spec.Right
|
||||
}
|
||||
return QueryRef{}
|
||||
}
|
||||
|
||||
// GetJoinType returns the join type.
|
||||
func (q *QueryEnvelope) GetJoinType() JoinType {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
return spec.Type
|
||||
}
|
||||
return JoinType{}
|
||||
}
|
||||
|
||||
// GetOn returns the join ON condition.
|
||||
func (q *QueryEnvelope) GetOn() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
return spec.On
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetQueryName returns the name of the spec.
|
||||
func (q *QueryEnvelope) GetQueryName() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderFormula:
|
||||
return spec.Name
|
||||
case QueryBuilderJoin:
|
||||
return spec.Name
|
||||
case PromQuery:
|
||||
return spec.Name
|
||||
case ClickHouseQuery:
|
||||
return spec.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsDisabled returns whether the spec is disabled.
|
||||
func (q *QueryEnvelope) IsDisabled() bool {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderFormula:
|
||||
return spec.Disabled
|
||||
case QueryBuilderJoin:
|
||||
return spec.Disabled
|
||||
case PromQuery:
|
||||
return spec.Disabled
|
||||
case ClickHouseQuery:
|
||||
return spec.Disabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetLimit returns the row limit.
|
||||
func (q *QueryEnvelope) GetLimit() int {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Limit
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Limit
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Limit
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Limit
|
||||
case QueryBuilderFormula:
|
||||
return spec.Limit
|
||||
case QueryBuilderJoin:
|
||||
return spec.Limit
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetOffset returns the row offset.
|
||||
func (q *QueryEnvelope) GetOffset() int {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Offset
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Offset
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Offset
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Offset
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetType returns the QueryType of the envelope.
|
||||
func (q *QueryEnvelope) GetType() QueryType {
|
||||
return q.Type
|
||||
}
|
||||
|
||||
// GetOrder returns the order-by clauses.
|
||||
func (q *QueryEnvelope) GetOrder() []OrderBy {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Order
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Order
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Order
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Order
|
||||
case QueryBuilderFormula:
|
||||
return spec.Order
|
||||
case QueryBuilderJoin:
|
||||
return spec.Order
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGroupBy returns the group-by keys.
|
||||
func (q *QueryEnvelope) GetGroupBy() []GroupByKey {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.GroupBy
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.GroupBy
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.GroupBy
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.GroupBy
|
||||
case QueryBuilderJoin:
|
||||
return spec.GroupBy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFilter returns the filter.
|
||||
func (q *QueryEnvelope) GetFilter() *Filter {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Filter
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Filter
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Filter
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Filter
|
||||
case QueryBuilderJoin:
|
||||
return spec.Filter
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetHaving returns the having clause.
|
||||
func (q *QueryEnvelope) GetHaving() *Having {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Having
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Having
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Having
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Having
|
||||
case QueryBuilderFormula:
|
||||
return spec.Having
|
||||
case QueryBuilderJoin:
|
||||
return spec.Having
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFunctions returns the post-processing functions.
|
||||
func (q *QueryEnvelope) GetFunctions() []Function {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Functions
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Functions
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Functions
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Functions
|
||||
case QueryBuilderFormula:
|
||||
return spec.Functions
|
||||
case QueryBuilderJoin:
|
||||
return spec.Functions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSelectFields returns the selected fields.
|
||||
func (q *QueryEnvelope) GetSelectFields() []telemetrytypes.TelemetryFieldKey {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.SelectFields
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.SelectFields
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.SelectFields
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.SelectFields
|
||||
case QueryBuilderJoin:
|
||||
return spec.SelectFields
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLegend returns the legend label.
|
||||
func (q *QueryEnvelope) GetLegend() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Legend
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Legend
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Legend
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Legend
|
||||
case QueryBuilderFormula:
|
||||
return spec.Legend
|
||||
case PromQuery:
|
||||
return spec.Legend
|
||||
case ClickHouseQuery:
|
||||
return spec.Legend
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetCursor returns the pagination cursor.
|
||||
func (q *QueryEnvelope) GetCursor() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Cursor
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Cursor
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Cursor
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Cursor
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetStepInterval returns the step interval.
|
||||
func (q *QueryEnvelope) GetStepInterval() Step {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.StepInterval
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.StepInterval
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.StepInterval
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.StepInterval
|
||||
case PromQuery:
|
||||
return spec.Step
|
||||
}
|
||||
return Step{}
|
||||
}
|
||||
|
||||
// GetSecondaryAggregations returns the secondary aggregations.
|
||||
func (q *QueryEnvelope) GetSecondaryAggregations() []SecondaryAggregation {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.SecondaryAggregations
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.SecondaryAggregations
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.SecondaryAggregations
|
||||
case QueryBuilderJoin:
|
||||
return spec.SecondaryAggregations
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLimitBy returns the limit-by configuration.
|
||||
func (q *QueryEnvelope) GetLimitBy() *LimitBy {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.LimitBy
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.LimitBy
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.LimitBy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,452 +0,0 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
|
||||
// SetExpression sets the expression string of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetExpression(expression string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Expression = expression
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Expression = expression
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetReturnSpansFrom sets the return-spans-from value, if applicable.
|
||||
func (q *QueryEnvelope) SetReturnSpansFrom(returnSpansFrom string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.ReturnSpansFrom = returnSpansFrom
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetSignal sets the signal of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetSignal(signal telemetrytypes.Signal) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Signal = signal
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Signal = signal
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Signal = signal
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetSource sets the source of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetSource(source telemetrytypes.Source) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Source = source
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Source = source
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Source = source
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetQuery sets the raw query string of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetQuery(query string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case PromQuery:
|
||||
spec.Query = query
|
||||
q.Spec = spec
|
||||
case ClickHouseQuery:
|
||||
spec.Query = query
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetStats sets the PromQL stats flag, if applicable.
|
||||
func (q *QueryEnvelope) SetStats(stats bool) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case PromQuery:
|
||||
spec.Stats = stats
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetLeft sets the left query reference of a join, if applicable.
|
||||
func (q *QueryEnvelope) SetLeft(left QueryRef) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
spec.Left = left
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetRight sets the right query reference of a join, if applicable.
|
||||
func (q *QueryEnvelope) SetRight(right QueryRef) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
spec.Right = right
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetJoinType sets the join type, if applicable.
|
||||
func (q *QueryEnvelope) SetJoinType(joinType JoinType) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
spec.Type = joinType
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetOn sets the join ON condition, if applicable.
|
||||
func (q *QueryEnvelope) SetOn(on string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
spec.On = on
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetQueryName sets the name of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetQueryName(name string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case PromQuery:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case ClickHouseQuery:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetDisabled sets the disabled flag of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetDisabled(disabled bool) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case PromQuery:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case ClickHouseQuery:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetLimit sets the row limit of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetLimit(limit int) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetOffset sets the row offset of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetOffset(offset int) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Offset = offset
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Offset = offset
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Offset = offset
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Offset = offset
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetType sets the QueryType of the envelope.
|
||||
func (q *QueryEnvelope) SetType(t QueryType) {
|
||||
q.Type = t
|
||||
}
|
||||
|
||||
// SetOrder sets the order-by clauses of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetOrder(order []OrderBy) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetGroupBy sets the group-by keys of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetGroupBy(groupBy []GroupByKey) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.GroupBy = groupBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.GroupBy = groupBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.GroupBy = groupBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.GroupBy = groupBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.GroupBy = groupBy
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetFilter sets the filter of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetFilter(filter *Filter) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Filter = filter
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Filter = filter
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Filter = filter
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Filter = filter
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Filter = filter
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetHaving sets the having clause of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetHaving(having *Having) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetFunctions sets the post-processing functions of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetFunctions(functions []Function) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetSelectFields sets the selected fields of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetSelectFields(fields []telemetrytypes.TelemetryFieldKey) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.SelectFields = fields
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.SelectFields = fields
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.SelectFields = fields
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.SelectFields = fields
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.SelectFields = fields
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetLegend sets the legend label of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetLegend(legend string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case PromQuery:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case ClickHouseQuery:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetCursor sets the pagination cursor of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetCursor(cursor string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Cursor = cursor
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Cursor = cursor
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Cursor = cursor
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Cursor = cursor
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetStepInterval sets the step interval of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetStepInterval(step Step) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.StepInterval = step
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.StepInterval = step
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.StepInterval = step
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.StepInterval = step
|
||||
q.Spec = spec
|
||||
case PromQuery:
|
||||
spec.Step = step
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetSecondaryAggregations sets the secondary aggregations of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetSecondaryAggregations(secondaryAggregations []SecondaryAggregation) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.SecondaryAggregations = secondaryAggregations
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.SecondaryAggregations = secondaryAggregations
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.SecondaryAggregations = secondaryAggregations
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.SecondaryAggregations = secondaryAggregations
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetLimitBy sets the limit-by configuration of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetLimitBy(limitBy *LimitBy) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.LimitBy = limitBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.LimitBy = limitBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.LimitBy = limitBy
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
@@ -10,9 +10,55 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// queryName returns the name from any query envelope spec type.
|
||||
func (e QueryEnvelope) queryName() string {
|
||||
switch spec := e.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderFormula:
|
||||
return spec.Name
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Name
|
||||
case QueryBuilderJoin:
|
||||
return spec.Name
|
||||
case PromQuery:
|
||||
return spec.Name
|
||||
case ClickHouseQuery:
|
||||
return spec.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// isDisabled returns the disabled status from any query envelope spec type.
|
||||
func (e QueryEnvelope) isDisabled() bool {
|
||||
switch spec := e.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderFormula:
|
||||
return spec.Disabled
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Disabled
|
||||
case QueryBuilderJoin:
|
||||
return spec.Disabled
|
||||
case PromQuery:
|
||||
return spec.Disabled
|
||||
case ClickHouseQuery:
|
||||
return spec.Disabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getQueryIdentifier returns a friendly identifier for a query based on its type and name/content
|
||||
func getQueryIdentifier(envelope QueryEnvelope, index int) string {
|
||||
name := envelope.GetQueryName()
|
||||
name := envelope.queryName()
|
||||
|
||||
var typeLabel string
|
||||
switch envelope.Type {
|
||||
@@ -43,115 +89,50 @@ const (
|
||||
MaxQueryLimit = 10000
|
||||
)
|
||||
|
||||
// ValidationOption is a functional option for configuring validation behaviour.
|
||||
type ValidationOption func(*validationConfig)
|
||||
|
||||
type validationConfig struct {
|
||||
skipLimitValidation bool
|
||||
skipAggregationValidation bool
|
||||
skipHavingValidation bool
|
||||
skipAggregationOrderBy bool
|
||||
skipSelectFieldValidation bool
|
||||
skipGroupByValidation bool
|
||||
}
|
||||
|
||||
func applyValidationOptions(opts []ValidationOption) validationConfig {
|
||||
cfg := validationConfig{}
|
||||
for _, opt := range opts {
|
||||
opt(&cfg)
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// SkipLimitValidation returns a ValidationOption that skips the limit range check.
|
||||
// Use this when the caller has already validated limits with different constraints.
|
||||
func WithSkipLimitValidation() ValidationOption {
|
||||
return func(cfg *validationConfig) {
|
||||
cfg.skipLimitValidation = true
|
||||
}
|
||||
}
|
||||
|
||||
// SkipAggregationValidation skips aggregation validation.
|
||||
// Used for raw/trace request types where aggregations are not required.
|
||||
func WithSkipAggregationValidation() ValidationOption {
|
||||
return func(cfg *validationConfig) {
|
||||
cfg.skipAggregationValidation = true
|
||||
}
|
||||
}
|
||||
|
||||
// SkipHavingValidation skips having-clause validation.
|
||||
// Used for raw/trace request types where having clauses do not apply.
|
||||
func WithSkipHavingValidation() ValidationOption {
|
||||
return func(cfg *validationConfig) {
|
||||
cfg.skipHavingValidation = true
|
||||
}
|
||||
}
|
||||
|
||||
// SkipAggregationOrderBy skips the aggregation-specific order-by key validation.
|
||||
// Used for raw/trace request types where order-by keys are not restricted to group-by or aggregation keys.
|
||||
func WithSkipAggregationOrderBy() ValidationOption {
|
||||
return func(cfg *validationConfig) {
|
||||
cfg.skipAggregationOrderBy = true
|
||||
}
|
||||
}
|
||||
|
||||
// SkipSelectFieldValidation skips select-field validation.
|
||||
// Used for aggregation request types where select fields do not apply.
|
||||
func WithSkipSelectFieldValidation() ValidationOption {
|
||||
return func(cfg *validationConfig) {
|
||||
cfg.skipSelectFieldValidation = true
|
||||
}
|
||||
}
|
||||
|
||||
// SkipGroupByValidation skips group-by validation.
|
||||
// Used for raw/trace request types where group-by does not apply.
|
||||
func WithSkipGroupByValidation() ValidationOption {
|
||||
return func(cfg *validationConfig) {
|
||||
cfg.skipGroupByValidation = true
|
||||
}
|
||||
}
|
||||
|
||||
// Validate performs preliminary validation on QueryBuilderQuery.
|
||||
func (q *QueryBuilderQuery[T]) Validate(opts ...ValidationOption) error {
|
||||
cfg := applyValidationOptions(opts)
|
||||
|
||||
// Validate performs preliminary validation on QueryBuilderQuery
|
||||
func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
|
||||
// Validate signal
|
||||
if err := q.validateSignal(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := q.validateAggregations(cfg); err != nil {
|
||||
if err := q.validateAggregations(requestType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := q.validateGroupBy(cfg); err != nil {
|
||||
if err := q.validateGroupBy(requestType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := q.validateLimitAndPagination(cfg); err != nil {
|
||||
// Validate limit and pagination
|
||||
if err := q.validateLimitAndPagination(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate functions
|
||||
if err := q.validateFunctions(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate secondary aggregations
|
||||
if err := q.validateSecondaryAggregations(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := q.validateOrderBy(cfg); err != nil {
|
||||
if err := q.validateOrderBy(requestType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := q.validateSelectFields(cfg); err != nil {
|
||||
if err := q.validateSelectFields(requestType); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateSelectFields(cfg validationConfig) error {
|
||||
if cfg.skipSelectFieldValidation {
|
||||
func (q *QueryBuilderQuery[T]) validateSelectFields(requestType RequestType) error {
|
||||
// selectFields don't apply to aggregation queries, skip validation
|
||||
if requestType.IsAggregation() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -167,8 +148,9 @@ func (q *QueryBuilderQuery[T]) validateSelectFields(cfg validationConfig) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateGroupBy(cfg validationConfig) error {
|
||||
if cfg.skipGroupByValidation {
|
||||
func (q *QueryBuilderQuery[T]) validateGroupBy(requestType RequestType) error {
|
||||
// groupBy doesn't apply to non-aggregation queries, skip validation
|
||||
if !requestType.IsAggregation() {
|
||||
return nil
|
||||
}
|
||||
for idx, item := range q.GroupBy {
|
||||
@@ -201,8 +183,9 @@ func (q *QueryBuilderQuery[T]) validateSignal() error {
|
||||
}
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateAggregations(cfg validationConfig) error {
|
||||
if cfg.skipAggregationValidation {
|
||||
func (q *QueryBuilderQuery[T]) validateAggregations(requestType RequestType) error {
|
||||
// aggregations don't apply to non-aggregation queries, skip validation
|
||||
if !requestType.IsAggregation() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -282,25 +265,24 @@ func (q *QueryBuilderQuery[T]) validateAggregations(cfg validationConfig) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateLimitAndPagination(cfg validationConfig) error {
|
||||
if !cfg.skipLimitValidation {
|
||||
if q.Limit < 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"limit must be non-negative, got %d",
|
||||
q.Limit,
|
||||
)
|
||||
}
|
||||
func (q *QueryBuilderQuery[T]) validateLimitAndPagination() error {
|
||||
// Validate limit
|
||||
if q.Limit < 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"limit must be non-negative, got %d",
|
||||
q.Limit,
|
||||
)
|
||||
}
|
||||
|
||||
if q.Limit > MaxQueryLimit {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"limit exceeds maximum allowed value of %d",
|
||||
MaxQueryLimit,
|
||||
).WithAdditional(
|
||||
fmt.Sprintf("Provided limit: %d", q.Limit),
|
||||
)
|
||||
}
|
||||
if q.Limit > MaxQueryLimit {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"limit exceeds maximum allowed value of %d",
|
||||
MaxQueryLimit,
|
||||
).WithAdditional(
|
||||
fmt.Sprintf("Provided limit: %d", q.Limit),
|
||||
)
|
||||
}
|
||||
|
||||
// Validate offset
|
||||
@@ -347,7 +329,7 @@ func (q *QueryBuilderQuery[T]) validateSecondaryAggregations() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *QueryBuilderQuery[T]) validateOrderBy(cfg validationConfig) error {
|
||||
func (q *QueryBuilderQuery[T]) validateOrderBy(requestType RequestType) error {
|
||||
for i, order := range q.Order {
|
||||
// Direction validation is handled by the OrderDirection type
|
||||
if order.Direction != OrderDirectionAsc && order.Direction != OrderDirectionDesc {
|
||||
@@ -366,7 +348,8 @@ func (q *QueryBuilderQuery[T]) validateOrderBy(cfg validationConfig) error {
|
||||
}
|
||||
}
|
||||
|
||||
if !cfg.skipAggregationOrderBy {
|
||||
// aggregation-specific order key validation only applies to aggregation queries
|
||||
if requestType.IsAggregation() {
|
||||
return q.validateOrderByForAggregation()
|
||||
}
|
||||
|
||||
@@ -448,8 +431,8 @@ func (q *QueryBuilderQuery[T]) validateOrderByForAggregation() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate validates the entire query range request.
|
||||
func (r *QueryRangeRequest) Validate(opts ...ValidationOption) error {
|
||||
// ValidateQueryRangeRequest validates the entire query range request
|
||||
func (r *QueryRangeRequest) Validate() error {
|
||||
// Validate time range
|
||||
if r.RequestType != RequestTypeRawStream && r.Start >= r.End {
|
||||
return errors.NewInvalidInputf(
|
||||
@@ -460,10 +443,8 @@ func (r *QueryRangeRequest) Validate(opts ...ValidationOption) error {
|
||||
|
||||
// Validate request type
|
||||
switch r.RequestType {
|
||||
case RequestTypeRaw, RequestTypeRawStream, RequestTypeTrace:
|
||||
opts = append(opts, getValidationOptions(false)...)
|
||||
case RequestTypeTimeSeries, RequestTypeScalar:
|
||||
opts = append(opts, getValidationOptions(true)...)
|
||||
case RequestTypeRaw, RequestTypeRawStream, RequestTypeTimeSeries, RequestTypeScalar, RequestTypeTrace:
|
||||
// Valid request types
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
@@ -475,7 +456,7 @@ func (r *QueryRangeRequest) Validate(opts ...ValidationOption) error {
|
||||
}
|
||||
|
||||
// Validate composite query
|
||||
if err := r.CompositeQuery.Validate(opts...); err != nil {
|
||||
if err := r.validateCompositeQuery(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -490,7 +471,7 @@ func (r *QueryRangeRequest) Validate(opts ...ValidationOption) error {
|
||||
// validateAllQueriesNotDisabled validates that at least one query in the composite query is enabled
|
||||
func (r *QueryRangeRequest) validateAllQueriesNotDisabled() error {
|
||||
for _, envelope := range r.CompositeQuery.Queries {
|
||||
if !envelope.IsDisabled() {
|
||||
if !envelope.isDisabled() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -501,8 +482,12 @@ func (r *QueryRangeRequest) validateAllQueriesNotDisabled() error {
|
||||
)
|
||||
}
|
||||
|
||||
func (r *QueryRangeRequest) validateCompositeQuery() error {
|
||||
return r.CompositeQuery.Validate(r.RequestType)
|
||||
}
|
||||
|
||||
// Validate performs validation on CompositeQuery
|
||||
func (c *CompositeQuery) Validate(opts ...ValidationOption) error {
|
||||
func (c *CompositeQuery) Validate(requestType RequestType) error {
|
||||
if len(c.Queries) == 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
@@ -514,14 +499,14 @@ func (c *CompositeQuery) Validate(opts ...ValidationOption) error {
|
||||
queryNames := make(map[string]bool)
|
||||
|
||||
for i, envelope := range c.Queries {
|
||||
if err := validateQueryEnvelope(envelope, opts...); err != nil {
|
||||
if err := validateQueryEnvelope(envelope, requestType); err != nil {
|
||||
queryId := getQueryIdentifier(envelope, i)
|
||||
return wrapValidationError(err, queryId, "invalid %s: %s")
|
||||
}
|
||||
|
||||
// Check name uniqueness for builder queries
|
||||
if envelope.Type == QueryTypeBuilder || envelope.Type == QueryTypeSubQuery {
|
||||
name := envelope.GetQueryName()
|
||||
name := envelope.queryName()
|
||||
if name != "" {
|
||||
if queryNames[name] {
|
||||
return errors.NewInvalidInputf(
|
||||
@@ -538,16 +523,16 @@ func (c *CompositeQuery) Validate(opts ...ValidationOption) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateQueryEnvelope(envelope QueryEnvelope, opts ...ValidationOption) error {
|
||||
func validateQueryEnvelope(envelope QueryEnvelope, requestType RequestType) error {
|
||||
switch envelope.Type {
|
||||
case QueryTypeBuilder, QueryTypeSubQuery:
|
||||
switch spec := envelope.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Validate(opts...)
|
||||
return spec.Validate(requestType)
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Validate(opts...)
|
||||
return spec.Validate(requestType)
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Validate(opts...)
|
||||
return spec.Validate(requestType)
|
||||
default:
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
@@ -633,11 +618,3 @@ func validateQueryEnvelope(envelope QueryEnvelope, opts ...ValidationOption) err
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func getValidationOptions(isAggregationQuery bool) []ValidationOption {
|
||||
if isAggregationQuery {
|
||||
return []ValidationOption{WithSkipSelectFieldValidation()}
|
||||
}
|
||||
return []ValidationOption{WithSkipAggregationValidation(), WithSkipHavingValidation(), WithSkipAggregationOrderBy(), WithSkipGroupByValidation()}
|
||||
|
||||
}
|
||||
|
||||
@@ -743,7 +743,7 @@ func TestValidateQueryEnvelope(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := validateQueryEnvelope(tt.envelope)
|
||||
err := validateQueryEnvelope(tt.envelope, tt.requestType)
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("validateQueryEnvelope() expected error but got none")
|
||||
@@ -816,7 +816,7 @@ func TestQueryEnvelope_Helpers(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.envelope.GetQueryName()
|
||||
got := tt.envelope.queryName()
|
||||
if got != tt.want {
|
||||
t.Errorf("queryName() = %q, want %q", got, tt.want)
|
||||
}
|
||||
@@ -868,7 +868,7 @@ func TestQueryEnvelope_Helpers(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.envelope.IsDisabled()
|
||||
got := tt.envelope.isDisabled()
|
||||
if got != tt.want {
|
||||
t.Errorf("isDisabled() = %v, want %v", got, tt.want)
|
||||
}
|
||||
@@ -1107,7 +1107,7 @@ func TestQueryRangeRequest_ValidateOrderByForAggregation(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.query.Validate(getValidationOptions(true)...)
|
||||
err := tt.query.Validate(RequestTypeTimeSeries)
|
||||
if tt.wantErr {
|
||||
if err == nil {
|
||||
t.Errorf("validateOrderByForAggregation() expected error but got none")
|
||||
@@ -1161,7 +1161,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "service.name"}},
|
||||
},
|
||||
}
|
||||
err := query.Validate(getValidationOptions(false)...)
|
||||
err := query.Validate(RequestTypeRaw)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for groupBy with raw request type, got: %v", err)
|
||||
}
|
||||
@@ -1178,7 +1178,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
|
||||
{TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: ""}},
|
||||
},
|
||||
}
|
||||
err := query.Validate(getValidationOptions(true)...)
|
||||
err := query.Validate(RequestTypeTimeSeries)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for empty groupBy key with timeseries request type")
|
||||
}
|
||||
@@ -1190,7 +1190,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Having: &Having{Expression: "count() > 10"},
|
||||
}
|
||||
err := query.Validate(getValidationOptions(false)...)
|
||||
err := query.Validate(RequestTypeRaw)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for having with raw request type, got: %v", err)
|
||||
}
|
||||
@@ -1202,7 +1202,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Having: &Having{Expression: "count() > 10"},
|
||||
}
|
||||
err := query.Validate(getValidationOptions(false)...)
|
||||
err := query.Validate(RequestTypeTrace)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for having with trace request type, got: %v", err)
|
||||
}
|
||||
@@ -1216,7 +1216,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
|
||||
{Expression: "count()"},
|
||||
},
|
||||
}
|
||||
err := query.Validate(getValidationOptions(false)...)
|
||||
err := query.Validate(RequestTypeRaw)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for aggregations with raw request type, got: %v", err)
|
||||
}
|
||||
@@ -1230,7 +1230,7 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
|
||||
{Expression: "count()"},
|
||||
},
|
||||
}
|
||||
err := query.Validate(getValidationOptions(false)...)
|
||||
err := query.Validate(RequestTypeRawStream)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for aggregations with raw_stream request type, got: %v", err)
|
||||
}
|
||||
@@ -1248,12 +1248,12 @@ func TestNonAggregationFieldsSkipped(t *testing.T) {
|
||||
},
|
||||
}
|
||||
// Should error for raw (selectFields are validated)
|
||||
err := query.Validate(getValidationOptions(false)...)
|
||||
err := query.Validate(RequestTypeRaw)
|
||||
if err == nil {
|
||||
t.Errorf("expected error for isRoot in selectFields with raw request type")
|
||||
}
|
||||
// Should pass for timeseries (selectFields skipped)
|
||||
err = query.Validate(getValidationOptions(true)...)
|
||||
err = query.Validate(RequestTypeTimeSeries)
|
||||
if err != nil {
|
||||
t.Errorf("expected no error for isRoot in selectFields with timeseries request type, got: %v", err)
|
||||
}
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
package types
|
||||
package savedviewtypes
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type SavedView struct {
|
||||
bun.BaseModel `bun:"table:saved_views"`
|
||||
|
||||
Identifiable
|
||||
TimeAuditable
|
||||
UserAuditable
|
||||
types.Identifiable
|
||||
types.TimeAuditable
|
||||
types.UserAuditable
|
||||
OrgID string `json:"orgId" bun:"org_id,notnull"`
|
||||
Name string `json:"name" bun:"name,type:text,notnull"`
|
||||
Category string `json:"category" bun:"category,type:text,notnull"`
|
||||
@@ -1,634 +0,0 @@
|
||||
import csv
|
||||
import io
|
||||
import json
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.logs import Logs
|
||||
|
||||
|
||||
def test_export_logs_csv(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 3 logs with different severity levels and attributes.
|
||||
|
||||
Tests:
|
||||
1. Export logs as CSV format
|
||||
2. Verify CSV structure and content
|
||||
3. Validate headers are present
|
||||
4. Check log data is correctly formatted
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="Application started successfully",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "api-service",
|
||||
"deployment.environment": "production",
|
||||
"host.name": "server-01",
|
||||
},
|
||||
attributes={
|
||||
"http.method": "GET",
|
||||
"http.status_code": 200,
|
||||
"user.id": "user123",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=8),
|
||||
body="Connection to database failed",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"service.name": "api-service",
|
||||
"deployment.environment": "production",
|
||||
"host.name": "server-01",
|
||||
},
|
||||
attributes={
|
||||
"error.type": "ConnectionError",
|
||||
"db.name": "production_db",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=5),
|
||||
body="Request processed",
|
||||
severity_text="DEBUG",
|
||||
resources={
|
||||
"service.name": "worker-service",
|
||||
"deployment.environment": "production",
|
||||
"host.name": "server-02",
|
||||
},
|
||||
attributes={
|
||||
"request.id": "req-456",
|
||||
"duration_ms": 150.5,
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
}
|
||||
|
||||
# Export logs as CSV (default format, no source needed)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=30,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "text/csv"
|
||||
assert "attachment" in response.headers.get("Content-Disposition", "")
|
||||
|
||||
# Parse CSV content
|
||||
csv_content = response.text
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_content))
|
||||
|
||||
rows = list(csv_reader)
|
||||
assert len(rows) == 3, f"Expected 3 rows, got {len(rows)}"
|
||||
|
||||
# Verify log bodies are present in the exported data
|
||||
bodies = [row.get("body") for row in rows]
|
||||
assert "Application started successfully" in bodies
|
||||
assert "Connection to database failed" in bodies
|
||||
assert "Request processed" in bodies
|
||||
|
||||
# Verify severity levels
|
||||
severities = [row.get("severity_text") for row in rows]
|
||||
assert "INFO" in severities
|
||||
assert "ERROR" in severities
|
||||
assert "DEBUG" in severities
|
||||
|
||||
|
||||
def test_export_logs_jsonl(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 2 logs with different attributes.
|
||||
|
||||
Tests:
|
||||
1. Export logs as JSONL format
|
||||
2. Verify JSONL structure and content
|
||||
3. Check each line is valid JSON
|
||||
4. Validate log data is correctly formatted
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="User logged in",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "auth-service",
|
||||
"deployment.environment": "staging",
|
||||
},
|
||||
attributes={
|
||||
"user.email": "test@example.com",
|
||||
"session.id": "sess-789",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=5),
|
||||
body="Payment processed successfully",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "payment-service",
|
||||
"deployment.environment": "staging",
|
||||
},
|
||||
attributes={
|
||||
"transaction.id": "txn-123",
|
||||
"amount": 99.99,
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "logs",
|
||||
}
|
||||
|
||||
# Export logs as JSONL
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
assert "attachment" in response.headers.get("Content-Disposition", "")
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 2, f"Expected 2 lines, got {len(jsonl_lines)}"
|
||||
|
||||
# Verify each line is valid JSON
|
||||
json_objects = []
|
||||
for line in jsonl_lines:
|
||||
obj = json.loads(line)
|
||||
json_objects.append(obj)
|
||||
assert "id" in obj
|
||||
assert "timestamp" in obj
|
||||
assert "body" in obj
|
||||
assert "severity_text" in obj
|
||||
|
||||
# Verify log bodies
|
||||
bodies = [obj.get("body") for obj in json_objects]
|
||||
assert "User logged in" in bodies
|
||||
assert "Payment processed successfully" in bodies
|
||||
|
||||
|
||||
def test_export_logs_with_filter(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert logs with different severity levels.
|
||||
|
||||
Tests:
|
||||
1. Export logs with filter applied
|
||||
2. Verify only filtered logs are returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="Info message",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=8),
|
||||
body="Error message",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=5),
|
||||
body="Another error message",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "logs",
|
||||
"filter": "severity_text = 'ERROR'",
|
||||
}
|
||||
|
||||
# Export logs with filter
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 2, f"Expected 2 lines (filtered), got {len(jsonl_lines)}"
|
||||
|
||||
# Verify only ERROR logs are returned
|
||||
for line in jsonl_lines:
|
||||
obj = json.loads(line)
|
||||
assert obj["severity_text"] == "ERROR"
|
||||
assert "error message" in obj["body"].lower()
|
||||
|
||||
|
||||
def test_export_logs_with_limit(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 5 logs.
|
||||
|
||||
Tests:
|
||||
1. Export logs with limit applied
|
||||
2. Verify only limited number of logs are returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
logs = []
|
||||
for i in range(5):
|
||||
logs.append(
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=i),
|
||||
body=f"Log message {i}",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={
|
||||
"index": i,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
insert_logs(logs)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "csv",
|
||||
"source": "logs",
|
||||
"limit": 3,
|
||||
}
|
||||
|
||||
# Export logs with limit
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "text/csv"
|
||||
|
||||
# Parse CSV content
|
||||
csv_content = response.text
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_content))
|
||||
|
||||
rows = list(csv_reader)
|
||||
assert len(rows) == 3, f"Expected 3 rows (limited), got {len(rows)}"
|
||||
|
||||
|
||||
def test_export_logs_with_columns(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert logs with various attributes.
|
||||
|
||||
Tests:
|
||||
1. Export logs with specific columns
|
||||
2. Verify only specified columns are returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="Test log message",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
"deployment.environment": "production",
|
||||
},
|
||||
attributes={
|
||||
"http.method": "GET",
|
||||
"http.status_code": 200,
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
# Request only specific columns
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "csv",
|
||||
"source": "logs",
|
||||
"columns": ["timestamp", "severity_text", "body"],
|
||||
}
|
||||
|
||||
# Export logs with specific columns
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params, doseq=True)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "text/csv"
|
||||
|
||||
# Parse CSV content
|
||||
csv_content = response.text
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_content))
|
||||
|
||||
rows = list(csv_reader)
|
||||
assert len(rows) == 1
|
||||
|
||||
# Verify the specified columns are present
|
||||
row = rows[0]
|
||||
assert "timestamp" in row
|
||||
assert "severity_text" in row
|
||||
assert "body" in row
|
||||
assert row["severity_text"] == "INFO"
|
||||
assert row["body"] == "Test log message"
|
||||
|
||||
|
||||
def test_export_logs_with_order_by(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert logs at different timestamps.
|
||||
|
||||
Tests:
|
||||
1. Export logs with ascending timestamp order
|
||||
2. Verify logs are returned in correct order
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="First log",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=5),
|
||||
body="Second log",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
body="Third log",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "logs",
|
||||
"order_by": "timestamp:asc",
|
||||
}
|
||||
|
||||
# Export logs with ascending order
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 3
|
||||
|
||||
# Verify order - first log should be "First log" (oldest)
|
||||
json_objects = [json.loads(line) for line in jsonl_lines]
|
||||
assert json_objects[0]["body"] == "First log"
|
||||
assert json_objects[1]["body"] == "Second log"
|
||||
assert json_objects[2]["body"] == "Third log"
|
||||
|
||||
|
||||
def test_export_logs_with_complex_filter(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert logs with various service names and severity levels.
|
||||
|
||||
Tests:
|
||||
1. Export logs with complex filter (multiple conditions)
|
||||
2. Verify only logs matching all conditions are returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="API error occurred",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"service.name": "api-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=8),
|
||||
body="Worker info message",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "worker-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=5),
|
||||
body="API info message",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "api-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
# Filter for api-service AND ERROR severity
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "logs",
|
||||
"filter": "service.name = 'api-service' AND severity_text = 'ERROR'",
|
||||
}
|
||||
|
||||
# Export logs with complex filter
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert (
|
||||
len(jsonl_lines) == 1
|
||||
), f"Expected 1 line (complex filter), got {len(jsonl_lines)}"
|
||||
|
||||
# Verify the filtered log
|
||||
filtered_obj = json.loads(jsonl_lines[0])
|
||||
assert filtered_obj["body"] == "API error occurred"
|
||||
assert filtered_obj["severity_text"] == "ERROR"
|
||||
@@ -1,782 +0,0 @@
|
||||
import csv
|
||||
import io
|
||||
import json
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
|
||||
|
||||
|
||||
def test_export_traces_csv(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 3 traces with different attributes.
|
||||
|
||||
Tests:
|
||||
1. Export traces as CSV format
|
||||
2. Verify CSV structure and content
|
||||
3. Validate headers are present
|
||||
4. Check trace data is correctly formatted
|
||||
"""
|
||||
http_service_trace_id = TraceIdGenerator.trace_id()
|
||||
http_service_span_id = TraceIdGenerator.span_id()
|
||||
http_service_db_span_id = TraceIdGenerator.span_id()
|
||||
topic_service_trace_id = TraceIdGenerator.trace_id()
|
||||
topic_service_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=3),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_span_id,
|
||||
parent_span_id="",
|
||||
name="POST /integration",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3.5),
|
||||
duration=timedelta(seconds=0.5),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_db_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="SELECT",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
duration=timedelta(seconds=2),
|
||||
trace_id=topic_service_trace_id,
|
||||
span_id=topic_service_span_id,
|
||||
parent_span_id="",
|
||||
name="topic publish",
|
||||
kind=TracesKind.SPAN_KIND_PRODUCER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"source": "traces",
|
||||
"limit": 1000,
|
||||
}
|
||||
|
||||
# Export traces as CSV (GET for simple queries)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=30,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "text/csv"
|
||||
assert "attachment" in response.headers.get("Content-Disposition", "")
|
||||
|
||||
# Parse CSV content
|
||||
csv_content = response.text
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_content))
|
||||
|
||||
rows = list(csv_reader)
|
||||
assert len(rows) == 3, f"Expected 3 rows, got {len(rows)}"
|
||||
|
||||
# Verify trace IDs are present in the exported data
|
||||
trace_ids = [row.get("trace_id") for row in rows]
|
||||
assert http_service_trace_id in trace_ids
|
||||
assert topic_service_trace_id in trace_ids
|
||||
|
||||
# Verify span names are present
|
||||
span_names = [row.get("name") for row in rows]
|
||||
assert "POST /integration" in span_names
|
||||
assert "SELECT" in span_names
|
||||
assert "topic publish" in span_names
|
||||
|
||||
|
||||
def test_export_traces_jsonl(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 2 traces with different attributes.
|
||||
|
||||
Tests:
|
||||
1. Export traces as JSONL format
|
||||
2. Verify JSONL structure and content
|
||||
3. Check each line is valid JSON
|
||||
4. Validate trace data is correctly formatted
|
||||
"""
|
||||
http_service_trace_id = TraceIdGenerator.trace_id()
|
||||
http_service_span_id = TraceIdGenerator.span_id()
|
||||
topic_service_trace_id = TraceIdGenerator.trace_id()
|
||||
topic_service_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=3),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_span_id,
|
||||
parent_span_id="",
|
||||
name="POST /api/test",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "api-service",
|
||||
"deployment.environment": "staging",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "201",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=2),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=topic_service_trace_id,
|
||||
span_id=topic_service_span_id,
|
||||
parent_span_id="",
|
||||
name="queue.process",
|
||||
kind=TracesKind.SPAN_KIND_CONSUMER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "queue-service",
|
||||
"deployment.environment": "staging",
|
||||
},
|
||||
attributes={
|
||||
"messaging.operation": "process",
|
||||
"messaging.system": "rabbitmq",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "traces",
|
||||
"limit": 1000,
|
||||
}
|
||||
|
||||
# Export traces as JSONL (GET for simple queries)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
assert "attachment" in response.headers.get("Content-Disposition", "")
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 2, f"Expected 2 lines, got {len(jsonl_lines)}"
|
||||
|
||||
# Verify each line is valid JSON
|
||||
json_objects = []
|
||||
for line in jsonl_lines:
|
||||
obj = json.loads(line)
|
||||
json_objects.append(obj)
|
||||
assert "trace_id" in obj
|
||||
assert "span_id" in obj
|
||||
assert "name" in obj
|
||||
|
||||
# Verify trace IDs are present
|
||||
trace_ids = [obj.get("trace_id") for obj in json_objects]
|
||||
assert http_service_trace_id in trace_ids
|
||||
assert topic_service_trace_id in trace_ids
|
||||
|
||||
# Verify span names are present
|
||||
span_names = [obj.get("name") for obj in json_objects]
|
||||
assert "POST /api/test" in span_names
|
||||
assert "queue.process" in span_names
|
||||
|
||||
|
||||
def test_export_traces_with_filter(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert traces with different service names.
|
||||
|
||||
Tests:
|
||||
1. Export traces with filter applied
|
||||
2. Verify only filtered traces are returned
|
||||
"""
|
||||
service_a_trace_id = TraceIdGenerator.trace_id()
|
||||
service_a_span_id = TraceIdGenerator.span_id()
|
||||
service_b_trace_id = TraceIdGenerator.trace_id()
|
||||
service_b_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=service_a_trace_id,
|
||||
span_id=service_a_span_id,
|
||||
parent_span_id="",
|
||||
name="operation-a",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "service-a",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=2),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=service_b_trace_id,
|
||||
span_id=service_b_span_id,
|
||||
parent_span_id="",
|
||||
name="operation-b",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "service-b",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "traces",
|
||||
"limit": 1000,
|
||||
"filter": "service.name = 'service-a'",
|
||||
}
|
||||
|
||||
# Export traces with filter (GET supports filter param)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 1, f"Expected 1 line (filtered), got {len(jsonl_lines)}"
|
||||
|
||||
# Verify the filtered trace
|
||||
filtered_obj = json.loads(jsonl_lines[0])
|
||||
assert filtered_obj["trace_id"] == service_a_trace_id
|
||||
assert filtered_obj["name"] == "operation-a"
|
||||
|
||||
|
||||
def test_export_traces_with_limit(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 5 traces.
|
||||
|
||||
Tests:
|
||||
1. Export traces with limit applied
|
||||
2. Verify only limited number of traces are returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
traces = []
|
||||
for i in range(5):
|
||||
traces.append(
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=i),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=TraceIdGenerator.trace_id(),
|
||||
span_id=TraceIdGenerator.span_id(),
|
||||
parent_span_id="",
|
||||
name=f"operation-{i}",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
)
|
||||
)
|
||||
|
||||
insert_traces(traces)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "csv",
|
||||
"source": "traces",
|
||||
"limit": 3,
|
||||
}
|
||||
|
||||
# Export traces with limit (GET supports limit param)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "text/csv"
|
||||
|
||||
# Parse CSV content
|
||||
csv_content = response.text
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_content))
|
||||
|
||||
rows = list(csv_reader)
|
||||
assert len(rows) == 3, f"Expected 3 rows (limited), got {len(rows)}"
|
||||
|
||||
|
||||
def test_export_traces_multiple_queries_rejected(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
) -> None:
|
||||
"""
|
||||
Tests:
|
||||
1. POST with multiple builder queries but no trace operator is rejected
|
||||
2. Verify 400 error is returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
body = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"compositeQuery": {
|
||||
"queries": [
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"signal": "traces",
|
||||
"name": "A",
|
||||
"limit": 1000,
|
||||
"filter": {"expression": "service.name = 'service-a'"},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"signal": "traces",
|
||||
"name": "B",
|
||||
"limit": 1000,
|
||||
"filter": {"expression": "service.name = 'service-b'"},
|
||||
},
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
|
||||
response = requests.post(
|
||||
url,
|
||||
json=body,
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.BAD_REQUEST
|
||||
|
||||
|
||||
def test_export_traces_with_composite_query_trace_operator(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert multiple traces with parent-child relationships.
|
||||
|
||||
Tests:
|
||||
1. Export traces using trace operator in composite query (POST)
|
||||
2. Verify trace operator query works correctly
|
||||
"""
|
||||
parent_trace_id = TraceIdGenerator.trace_id()
|
||||
parent_span_id = TraceIdGenerator.span_id()
|
||||
child_span_id_1 = TraceIdGenerator.span_id()
|
||||
child_span_id_2 = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
duration=timedelta(seconds=5),
|
||||
trace_id=parent_trace_id,
|
||||
span_id=parent_span_id,
|
||||
parent_span_id="",
|
||||
name="parent-operation",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "parent-service",
|
||||
},
|
||||
attributes={
|
||||
"operation.type": "parent",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=9),
|
||||
duration=timedelta(seconds=2),
|
||||
trace_id=parent_trace_id,
|
||||
span_id=child_span_id_1,
|
||||
parent_span_id=parent_span_id,
|
||||
name="child-operation-1",
|
||||
kind=TracesKind.SPAN_KIND_INTERNAL,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "parent-service",
|
||||
},
|
||||
attributes={
|
||||
"operation.type": "child",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=7),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=parent_trace_id,
|
||||
span_id=child_span_id_2,
|
||||
parent_span_id=parent_span_id,
|
||||
name="child-operation-2",
|
||||
kind=TracesKind.SPAN_KIND_INTERNAL,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "parent-service",
|
||||
},
|
||||
attributes={
|
||||
"operation.type": "child",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
# A: spans with operation.type = 'parent'
|
||||
query_a = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"signal": "traces",
|
||||
"name": "A",
|
||||
"limit": 1000,
|
||||
"filter": {"expression": "operation.type = 'parent'"},
|
||||
},
|
||||
}
|
||||
|
||||
# B: spans with operation.type = 'child'
|
||||
query_b = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"signal": "traces",
|
||||
"name": "B",
|
||||
"limit": 1000,
|
||||
"filter": {"expression": "operation.type = 'child'"},
|
||||
},
|
||||
}
|
||||
|
||||
# Trace operator: find traces where A has a direct descendant B
|
||||
query_c = {
|
||||
"type": "builder_trace_operator",
|
||||
"spec": {
|
||||
"name": "C",
|
||||
"expression": "A => B",
|
||||
"returnSpansFrom": "A",
|
||||
"limit": 1000,
|
||||
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
|
||||
},
|
||||
}
|
||||
|
||||
body = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"requestType": "raw",
|
||||
"compositeQuery": {
|
||||
"queries": [query_a, query_b, query_c],
|
||||
},
|
||||
}
|
||||
|
||||
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
|
||||
response = requests.post(
|
||||
url,
|
||||
json=body,
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
print(response.text)
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 1, f"Expected at least 1 line, got {len(jsonl_lines)}"
|
||||
|
||||
# Verify all returned spans belong to the matched trace
|
||||
json_objects = [json.loads(line) for line in jsonl_lines]
|
||||
trace_ids = [obj.get("trace_id") for obj in json_objects]
|
||||
assert all(tid == parent_trace_id for tid in trace_ids)
|
||||
|
||||
# Verify the parent span (returnSpansFrom = "A") is present
|
||||
span_names = [obj.get("name") for obj in json_objects]
|
||||
assert "parent-operation" in span_names
|
||||
|
||||
|
||||
def test_export_traces_with_select_fields(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert traces with various attributes.
|
||||
|
||||
Tests:
|
||||
1. Export traces with specific select fields via POST
|
||||
2. Verify only specified fields are returned in the output
|
||||
"""
|
||||
trace_id = TraceIdGenerator.trace_id()
|
||||
span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
duration=timedelta(seconds=2),
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
parent_span_id="",
|
||||
name="test-operation",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
"deployment.environment": "production",
|
||||
"host.name": "server-01",
|
||||
},
|
||||
attributes={
|
||||
"http.method": "POST",
|
||||
"http.status_code": "201",
|
||||
"user.id": "user123",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
body = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"requestType": "raw",
|
||||
"compositeQuery": {
|
||||
"queries": [
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"signal": "traces",
|
||||
"name": "A",
|
||||
"limit": 1000,
|
||||
"selectFields": [
|
||||
{
|
||||
"name": "trace_id",
|
||||
"fieldDataType": "string",
|
||||
"fieldContext": "span",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "span_id",
|
||||
"fieldDataType": "string",
|
||||
"fieldContext": "span",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "name",
|
||||
"fieldDataType": "string",
|
||||
"fieldContext": "span",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "service.name",
|
||||
"fieldDataType": "string",
|
||||
"fieldContext": "resource",
|
||||
"signal": "traces",
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
|
||||
response = requests.post(
|
||||
url,
|
||||
json=body,
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 1
|
||||
|
||||
# Verify the selected fields are present
|
||||
result = json.loads(jsonl_lines[0])
|
||||
assert "trace_id" in result
|
||||
assert "span_id" in result
|
||||
assert "name" in result
|
||||
|
||||
# Verify values
|
||||
assert result["trace_id"] == trace_id
|
||||
assert result["span_id"] == span_id
|
||||
assert result["name"] == "test-operation"
|
||||
Reference in New Issue
Block a user