mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-19 07:22:29 +00:00
Compare commits
6 Commits
nv/6703
...
tvats-expo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b8c101e95a | ||
|
|
85c9daab12 | ||
|
|
4a80ef8f43 | ||
|
|
ac8600f5da | ||
|
|
b2655634fe | ||
|
|
b2f20f7a64 |
4
.vscode/settings.json
vendored
4
.vscode/settings.json
vendored
@@ -17,5 +17,7 @@
|
||||
},
|
||||
"[html]": {
|
||||
"editor.defaultFormatter": "vscode.html-language-features"
|
||||
}
|
||||
},
|
||||
"python-envs.defaultEnvManager": "ms-python.python:system",
|
||||
"python-envs.pythonProjects": []
|
||||
}
|
||||
|
||||
@@ -2521,6 +2521,149 @@ paths:
|
||||
summary: Update auth domain
|
||||
tags:
|
||||
- authdomains
|
||||
/api/v1/export_raw_data:
|
||||
get:
|
||||
deprecated: false
|
||||
description: This endpoints allows simple query exporting raw data for traces
|
||||
and logs
|
||||
operationId: HandleExportRawDataGET
|
||||
parameters:
|
||||
- in: query
|
||||
name: format
|
||||
schema:
|
||||
default: csv
|
||||
enum:
|
||||
- csv
|
||||
- jsonl
|
||||
type: string
|
||||
- in: query
|
||||
name: source
|
||||
schema:
|
||||
default: logs
|
||||
enum:
|
||||
- logs
|
||||
- traces
|
||||
type: string
|
||||
- in: query
|
||||
name: start
|
||||
schema:
|
||||
minimum: 0
|
||||
type: integer
|
||||
- in: query
|
||||
name: end
|
||||
schema:
|
||||
minimum: 0
|
||||
type: integer
|
||||
- in: query
|
||||
name: limit
|
||||
schema:
|
||||
default: 10000
|
||||
maximum: 50000
|
||||
minimum: 1
|
||||
type: integer
|
||||
- in: query
|
||||
name: filter
|
||||
schema:
|
||||
type: string
|
||||
- in: query
|
||||
name: columns
|
||||
schema:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
- in: query
|
||||
name: order_by
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: string
|
||||
description: OK
|
||||
"400":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Bad Request
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
summary: Export raw data
|
||||
tags:
|
||||
- logs
|
||||
- traces
|
||||
post:
|
||||
deprecated: false
|
||||
description: This endpoints allows complex query exporting raw data for traces
|
||||
and logs
|
||||
operationId: HandleExportRawDataPOST
|
||||
parameters:
|
||||
- in: query
|
||||
name: format
|
||||
schema:
|
||||
default: csv
|
||||
enum:
|
||||
- csv
|
||||
- jsonl
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
description: Request body for the v5 query range endpoint. Supports
|
||||
builder queries (traces, logs, metrics), formulas, joins, trace operators,
|
||||
PromQL, and ClickHouse SQL queries.
|
||||
properties:
|
||||
compositeQuery:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5CompositeQuery'
|
||||
end:
|
||||
minimum: 0
|
||||
type: integer
|
||||
formatOptions:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5FormatOptions'
|
||||
noCache:
|
||||
type: boolean
|
||||
requestType:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5RequestType'
|
||||
schemaVersion:
|
||||
type: string
|
||||
start:
|
||||
minimum: 0
|
||||
type: integer
|
||||
variables:
|
||||
additionalProperties:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5VariableItem'
|
||||
type: object
|
||||
type: object
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: string
|
||||
description: OK
|
||||
"400":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Bad Request
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
summary: Export raw data
|
||||
tags:
|
||||
- logs
|
||||
- traces
|
||||
/api/v1/fields/keys:
|
||||
get:
|
||||
deprecated: false
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
"github.com/SigNoz/signoz/pkg/modules/promote"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
|
||||
"github.com/SigNoz/signoz/pkg/modules/session"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
@@ -44,6 +45,7 @@ type provider struct {
|
||||
gatewayHandler gateway.Handler
|
||||
fieldsHandler fields.Handler
|
||||
authzHandler authz.Handler
|
||||
rawDataExportHandler rawdataexport.Handler
|
||||
}
|
||||
|
||||
func NewFactory(
|
||||
@@ -63,6 +65,7 @@ func NewFactory(
|
||||
gatewayHandler gateway.Handler,
|
||||
fieldsHandler fields.Handler,
|
||||
authzHandler authz.Handler,
|
||||
rawDataExportHandler rawdataexport.Handler,
|
||||
) factory.ProviderFactory[apiserver.APIServer, apiserver.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("signoz"), func(ctx context.Context, providerSettings factory.ProviderSettings, config apiserver.Config) (apiserver.APIServer, error) {
|
||||
return newProvider(
|
||||
@@ -85,6 +88,7 @@ func NewFactory(
|
||||
gatewayHandler,
|
||||
fieldsHandler,
|
||||
authzHandler,
|
||||
rawDataExportHandler,
|
||||
)
|
||||
})
|
||||
}
|
||||
@@ -109,6 +113,7 @@ func newProvider(
|
||||
gatewayHandler gateway.Handler,
|
||||
fieldsHandler fields.Handler,
|
||||
authzHandler authz.Handler,
|
||||
rawDataExportHandler rawdataexport.Handler,
|
||||
) (apiserver.APIServer, error) {
|
||||
settings := factory.NewScopedProviderSettings(providerSettings, "github.com/SigNoz/signoz/pkg/apiserver/signozapiserver")
|
||||
router := mux.NewRouter().UseEncodedPath()
|
||||
@@ -131,6 +136,7 @@ func newProvider(
|
||||
gatewayHandler: gatewayHandler,
|
||||
fieldsHandler: fieldsHandler,
|
||||
authzHandler: authzHandler,
|
||||
rawDataExportHandler: rawDataExportHandler,
|
||||
}
|
||||
|
||||
provider.authZ = middleware.NewAuthZ(settings.Logger(), orgGetter, authz)
|
||||
@@ -199,6 +205,10 @@ func (provider *provider) AddToRouter(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := provider.addRawDataExportRoutes(router); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
46
pkg/apiserver/signozapiserver/rawDataExport.go
Normal file
46
pkg/apiserver/signozapiserver/rawDataExport.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package signozapiserver
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/http/handler"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
v5 "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func (provider *provider) addRawDataExportRoutes(router *mux.Router) error {
|
||||
if err := router.Handle("/api/v1/export_raw_data", handler.New(provider.authZ.ViewAccess(provider.rawDataExportHandler.ExportRawData), handler.OpenAPIDef{
|
||||
ID: "HandleExportRawDataGET",
|
||||
Tags: []string{"logs", "traces"},
|
||||
Summary: "Export raw data",
|
||||
Description: "This endpoints allows simple query exporting raw data for traces and logs",
|
||||
Request: new(types.ExportRawDataQueryParams),
|
||||
RequestContentType: "application/json",
|
||||
Response: nil,
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest},
|
||||
})).Methods(http.MethodGet).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := router.Handle("/api/v1/export_raw_data", handler.New(provider.authZ.ViewAccess(provider.rawDataExportHandler.ExportRawData), handler.OpenAPIDef{
|
||||
ID: "HandleExportRawDataPOST",
|
||||
Tags: []string{"logs", "traces"},
|
||||
Summary: "Export raw data",
|
||||
Description: "This endpoints allows complex query exporting raw data for traces and logs",
|
||||
Request: new(struct {
|
||||
v5.QueryRangeRequest
|
||||
types.ExportRawDataFormatQueryParam
|
||||
}),
|
||||
RequestContentType: "application/json",
|
||||
Response: nil,
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest},
|
||||
})).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -6,17 +6,18 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/binding"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
@@ -34,31 +35,27 @@ func NewHandler(module rawdataexport.Module) rawdataexport.Handler {
|
||||
// ExportRawData handles data export requests.
|
||||
//
|
||||
// API Documentation:
|
||||
// Endpoint: GET /api/v1/export_raw_data
|
||||
// Endpoint: GET /api/v1/export_raw_data - logs and traces only (simple queries via query params)
|
||||
// Endpoint: POST /api/v1/export_raw_data - logs, traces, and trace operator (full QueryRangeRequest in JSON body)
|
||||
//
|
||||
// Query Parameters:
|
||||
// GET: Converts query params to QueryRangeRequest and delegates to common export logic. No composite_query/trace operator.
|
||||
//
|
||||
// - source (optional): Type of data to export ["logs" (default), "metrics", "traces"]
|
||||
// Note: Currently only "logs" is fully supported
|
||||
// GET Query Parameters:
|
||||
//
|
||||
// - source (optional): ["logs" (default) or "traces"] - metrics not supported
|
||||
// - format (optional): Output format ["csv" (default), "jsonl"]
|
||||
// - start (required): Start time (Unix timestamp in nanoseconds)
|
||||
// - end (required): End time (Unix timestamp in nanoseconds)
|
||||
// - limit (optional): Max rows to export (cannot exceed MAX_EXPORT_ROW_COUNT_LIMIT)
|
||||
// - filter (optional): Filter expression
|
||||
// - columns (optional): Columns to include
|
||||
// - order_by (optional): Sorting ["column:direction"]
|
||||
//
|
||||
// - start (required): Start time for query (Unix timestamp in nanoseconds)
|
||||
// POST Request Body (QueryRangeRequest):
|
||||
//
|
||||
// - end (required): End time for query (Unix timestamp in nanoseconds)
|
||||
//
|
||||
// - limit (optional): Maximum number of rows to export
|
||||
// Constraints: Must be positive and cannot exceed MAX_EXPORT_ROW_COUNT_LIMIT
|
||||
//
|
||||
// - filter (optional): Filter expression to apply to the query
|
||||
//
|
||||
// - columns (optional): Specific columns to include in export
|
||||
// Default: all columns are returned
|
||||
// Format: ["context.field:type", "context.field", "field"]
|
||||
//
|
||||
// - order_by (optional): Sorting specification ["column:direction" or "context.field:type:direction"]
|
||||
// Direction: "asc" or "desc"
|
||||
// Default: ["timestamp:desc", "id:desc"]
|
||||
// - Accepts a full QueryRangeRequest JSON body with composite_query containing QueryEnvelope array
|
||||
// - Supports builder_query and builder_trace_operator types
|
||||
// - format query param still controls output format
|
||||
//
|
||||
// Response Headers:
|
||||
// - Content-Type: "text/csv" or "application/x-ndjson"
|
||||
@@ -86,75 +83,45 @@ func NewHandler(module rawdataexport.Module) rawdataexport.Handler {
|
||||
// Export with filter and ordering:
|
||||
// GET /api/v1/export_raw_data?start=1693612800000000000&end=1693699199000000000
|
||||
// &filter=severity="error"&order_by=timestamp:desc&limit=1000
|
||||
//
|
||||
// Export with composite query (POST only):
|
||||
// POST /api/v1/export_raw_data?format=csv
|
||||
// Body: {"start":1693612800000000000,"end":1693699199000000000,"composite_query":{"queries":[...]}}
|
||||
func (handler *handler) ExportRawData(rw http.ResponseWriter, r *http.Request) {
|
||||
source, err := getExportQuerySource(r.URL.Query())
|
||||
if err != nil {
|
||||
var queryRangeRequest qbtypes.QueryRangeRequest
|
||||
var format string
|
||||
|
||||
if r.Method == http.MethodGet {
|
||||
var params types.ExportRawDataQueryParams
|
||||
if err := binding.Query.BindQuery(r.URL.Query(), ¶ms); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
format = params.Format
|
||||
var err error
|
||||
queryRangeRequest, err = buildQueryRangeRequest(¶ms)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
var formatParam types.ExportRawDataFormatQueryParam
|
||||
if err := binding.Query.BindQuery(r.URL.Query(), &formatParam); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
format = formatParam.Format
|
||||
if err := json.NewDecoder(r.Body).Decode(&queryRangeRequest); err != nil {
|
||||
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid request body: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateAndApplyExportLimits(&queryRangeRequest); err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
switch source {
|
||||
case "logs":
|
||||
handler.exportLogs(rw, r)
|
||||
case "traces":
|
||||
handler.exportTraces(rw, r)
|
||||
case "metrics":
|
||||
handler.exportMetrics(rw, r)
|
||||
default:
|
||||
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid source: must be logs"))
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *handler) exportMetrics(rw http.ResponseWriter, r *http.Request) {
|
||||
render.Error(rw, errors.Newf(errors.TypeUnsupported, errors.CodeUnsupported, "metrics export is not yet supported"))
|
||||
}
|
||||
|
||||
func (handler *handler) exportTraces(rw http.ResponseWriter, r *http.Request) {
|
||||
render.Error(rw, errors.Newf(errors.TypeUnsupported, errors.CodeUnsupported, "traces export is not yet supported"))
|
||||
}
|
||||
|
||||
func (handler *handler) exportLogs(rw http.ResponseWriter, r *http.Request) {
|
||||
// Set up response headers
|
||||
rw.Header().Set("Cache-Control", "no-cache")
|
||||
rw.Header().Set("Vary", "Accept-Encoding") // Indicate that response varies based on Accept-Encoding
|
||||
rw.Header().Set("Access-Control-Expose-Headers", "Content-Disposition, X-Response-Complete")
|
||||
rw.Header().Set("Trailer", "X-Response-Complete")
|
||||
rw.Header().Set("Transfer-Encoding", "chunked")
|
||||
|
||||
queryParams := r.URL.Query()
|
||||
|
||||
startTime, endTime, err := getExportQueryTimeRange(queryParams)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
limit, err := getExportQueryLimit(queryParams)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
format, err := getExportQueryFormat(queryParams)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Set appropriate content type and filename
|
||||
filename := fmt.Sprintf("data_exported_%s.%s", time.Now().Format("2006-01-02_150405"), format)
|
||||
rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||
|
||||
filterExpression := queryParams.Get("filter")
|
||||
|
||||
orderByExpression, err := getExportQueryOrderBy(queryParams)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
columns := getExportQueryColumns(queryParams)
|
||||
|
||||
claims, err := authtypes.ClaimsFromContext(r.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
@@ -167,70 +134,145 @@ func (handler *handler) exportLogs(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
queryRangeRequest := qbtypes.QueryRangeRequest{
|
||||
Start: startTime,
|
||||
End: endTime,
|
||||
RequestType: qbtypes.RequestTypeRaw,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
setExportResponseHeaders(rw, format)
|
||||
|
||||
spec := qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Name: "raw",
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: filterExpression,
|
||||
},
|
||||
Limit: limit,
|
||||
Order: orderByExpression,
|
||||
}
|
||||
|
||||
spec.SelectFields = columns
|
||||
|
||||
queryRangeRequest.CompositeQuery.Queries[0].Spec = spec
|
||||
|
||||
// This will signal Export module to stop sending data
|
||||
doneChan := make(chan any)
|
||||
defer close(doneChan)
|
||||
rowChan, errChan := handler.module.ExportRawData(r.Context(), orgID, &queryRangeRequest, doneChan)
|
||||
|
||||
var isComplete bool
|
||||
isComplete, err := handler.executeExport(rowChan, errChan, format, rw)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
rw.Header().Set("X-Response-Complete", strconv.FormatBool(isComplete))
|
||||
}
|
||||
|
||||
// validateAndApplyExportLimits validates query types and applies default/max limits to all queries.
|
||||
func validateAndApplyExportLimits(req *qbtypes.QueryRangeRequest) error {
|
||||
isTraceOperatorQueryPresent := false
|
||||
|
||||
// Check if the trace operator query is present
|
||||
queries := req.CompositeQuery.Queries
|
||||
for idx := range len(queries) {
|
||||
if _, ok := queries[idx].Spec.(qbtypes.QueryBuilderTraceOperator); ok {
|
||||
isTraceOperatorQueryPresent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If the trace operator query is not present, and there are multiple queries, return an error
|
||||
if !isTraceOperatorQueryPresent && len(queries) > 1 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "multiple queries not allowed without a trace operator query")
|
||||
}
|
||||
|
||||
for idx := range queries {
|
||||
switch spec := queries[idx].Spec.(type) {
|
||||
case qbtypes.QueryBuilderQuery[qbtypes.LogAggregation],
|
||||
qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation],
|
||||
qbtypes.QueryBuilderTraceOperator:
|
||||
limit := queries[idx].GetLimit()
|
||||
if limit == 0 {
|
||||
limit = DefaultExportRowCountLimit
|
||||
} else if limit < 0 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be positive")
|
||||
} else if limit > MaxExportRowCountLimit {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit cannot be more than %d", MaxExportRowCountLimit)
|
||||
}
|
||||
queries[idx].SetLimit(limit)
|
||||
default:
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "unsupported query type: %T", spec)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildQueryRangeRequest builds a QueryRangeRequest from already-bound and validated GET query params.
|
||||
func buildQueryRangeRequest(params *types.ExportRawDataQueryParams) (qbtypes.QueryRangeRequest, error) {
|
||||
orderBy, err := parseExportQueryOrderBy(params.OrderBy)
|
||||
if err != nil {
|
||||
return qbtypes.QueryRangeRequest{}, err
|
||||
}
|
||||
|
||||
columns := parseExportQueryColumns(params.Columns)
|
||||
|
||||
var filter *qbtypes.Filter
|
||||
if params.Filter != "" {
|
||||
filter = &qbtypes.Filter{Expression: params.Filter}
|
||||
}
|
||||
|
||||
var query qbtypes.QueryEnvelope
|
||||
switch params.Source {
|
||||
case "logs":
|
||||
query = qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Filter: filter,
|
||||
Limit: params.Limit,
|
||||
Order: orderBy,
|
||||
SelectFields: columns,
|
||||
},
|
||||
}
|
||||
case "traces":
|
||||
query = qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Filter: filter,
|
||||
Limit: params.Limit,
|
||||
Order: orderBy,
|
||||
SelectFields: columns,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return qbtypes.QueryRangeRequest{
|
||||
Start: params.Start,
|
||||
End: params.End,
|
||||
RequestType: qbtypes.RequestTypeRaw,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{query},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// setExportResponseHeaders sets common HTTP headers for export responses.
|
||||
func setExportResponseHeaders(rw http.ResponseWriter, format string) {
|
||||
rw.Header().Set("Cache-Control", "no-cache")
|
||||
rw.Header().Set("Vary", "Accept-Encoding")
|
||||
rw.Header().Set("Access-Control-Expose-Headers", "Content-Disposition, X-Response-Complete")
|
||||
rw.Header().Set("Trailer", "X-Response-Complete")
|
||||
rw.Header().Set("Transfer-Encoding", "chunked")
|
||||
filename := fmt.Sprintf("data_exported_%s.%s", time.Now().Format("2006-01-02_150405"), format)
|
||||
rw.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
|
||||
}
|
||||
|
||||
// executeExport streams data from rowChan to the response writer in the specified format.
|
||||
func (handler *handler) executeExport(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, format string, rw http.ResponseWriter) (bool, error) {
|
||||
switch format {
|
||||
case "csv", "":
|
||||
rw.Header().Set("Content-Type", "text/csv")
|
||||
csvWriter := csv.NewWriter(rw)
|
||||
isComplete, err = handler.exportLogsCSV(rowChan, errChan, csvWriter)
|
||||
isComplete, err := handler.exportRawDataCSV(rowChan, errChan, csvWriter)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
return false, err
|
||||
}
|
||||
csvWriter.Flush()
|
||||
return isComplete, nil
|
||||
case "jsonl":
|
||||
rw.Header().Set("Content-Type", "application/x-ndjson")
|
||||
isComplete, err = handler.exportLogsJSONL(rowChan, errChan, rw)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
return handler.exportRawDataJSONL(rowChan, errChan, rw)
|
||||
default:
|
||||
render.Error(rw, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid format: must be csv or jsonl"))
|
||||
return
|
||||
return false, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid format: must be csv or jsonl")
|
||||
}
|
||||
|
||||
rw.Header().Set("X-Response-Complete", strconv.FormatBool(isComplete))
|
||||
}
|
||||
|
||||
func (handler *handler) exportLogsCSV(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, csvWriter *csv.Writer) (bool, error) {
|
||||
var header []string
|
||||
// exportRawDataCSV is a generic CSV export function that works with any raw data (logs, traces, etc.)
|
||||
func (handler *handler) exportRawDataCSV(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, csvWriter *csv.Writer) (bool, error) {
|
||||
|
||||
headerToIndexMapping := make(map[string]int, len(header))
|
||||
headerToIndexMapping := make(map[string]int)
|
||||
var once sync.Once
|
||||
|
||||
totalBytes := uint64(0)
|
||||
for {
|
||||
@@ -239,18 +281,14 @@ func (handler *handler) exportLogsCSV(rowChan <-chan *qbtypes.RawRow, errChan <-
|
||||
if !ok {
|
||||
return true, nil
|
||||
}
|
||||
if header == nil {
|
||||
// Initialize and write header for CSV
|
||||
header = constructCSVHeaderFromQueryResponse(row.Data)
|
||||
|
||||
if err := csvWriter.Write(header); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
once.Do(func() {
|
||||
header := constructCSVHeaderFromQueryResponse(row.Data)
|
||||
_ = csvWriter.Write(header)
|
||||
// We ignore the error here, as it will get caught in the next iteration
|
||||
for i, col := range header {
|
||||
headerToIndexMapping[col] = i
|
||||
}
|
||||
}
|
||||
})
|
||||
record := constructCSVRecordFromQueryResponse(row.Data, headerToIndexMapping)
|
||||
if err := csvWriter.Write(record); err != nil {
|
||||
return false, err
|
||||
@@ -268,8 +306,8 @@ func (handler *handler) exportLogsCSV(rowChan <-chan *qbtypes.RawRow, errChan <-
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *handler) exportLogsJSONL(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, writer io.Writer) (bool, error) {
|
||||
|
||||
// exportRawDataJSONL is a generic JSONL export function that works with any raw data (logs, traces, etc.)
|
||||
func (handler *handler) exportRawDataJSONL(rowChan <-chan *qbtypes.RawRow, errChan <-chan error, writer io.Writer) (bool, error) {
|
||||
totalBytes := uint64(0)
|
||||
for {
|
||||
select {
|
||||
@@ -277,9 +315,11 @@ func (handler *handler) exportLogsJSONL(rowChan <-chan *qbtypes.RawRow, errChan
|
||||
if !ok {
|
||||
return true, nil
|
||||
}
|
||||
// Handle JSON format (JSONL - one object per line)
|
||||
jsonBytes, _ := json.Marshal(row.Data)
|
||||
totalBytes += uint64(len(jsonBytes)) + 1 // +1 for newline
|
||||
jsonBytes, err := json.Marshal(row.Data)
|
||||
if err != nil {
|
||||
return false, errors.NewUnexpectedf(errors.CodeInternal, "error marshaling JSON: %s", err)
|
||||
}
|
||||
totalBytes += uint64(len(jsonBytes)) + 1
|
||||
|
||||
if _, err := writer.Write(jsonBytes); err != nil {
|
||||
return false, errors.NewUnexpectedf(errors.CodeInternal, "error writing JSON: %s", err)
|
||||
@@ -299,69 +339,6 @@ func (handler *handler) exportLogsJSONL(rowChan <-chan *qbtypes.RawRow, errChan
|
||||
}
|
||||
}
|
||||
|
||||
func getExportQuerySource(queryParams url.Values) (string, error) {
|
||||
switch queryParams.Get("source") {
|
||||
case "logs", "":
|
||||
return "logs", nil
|
||||
case "metrics":
|
||||
return "metrics", errors.NewInvalidInputf(errors.CodeInvalidInput, "metrics export not yet supported")
|
||||
case "traces":
|
||||
return "traces", errors.NewInvalidInputf(errors.CodeInvalidInput, "traces export not yet supported")
|
||||
default:
|
||||
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid source: must be logs, metrics or traces")
|
||||
}
|
||||
}
|
||||
|
||||
func getExportQueryFormat(queryParams url.Values) (string, error) {
|
||||
switch queryParams.Get("format") {
|
||||
case "csv", "":
|
||||
return "csv", nil
|
||||
case "jsonl":
|
||||
return "jsonl", nil
|
||||
default:
|
||||
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid format: must be csv or jsonl")
|
||||
}
|
||||
}
|
||||
|
||||
func getExportQueryLimit(queryParams url.Values) (int, error) {
|
||||
|
||||
limitStr := queryParams.Get("limit")
|
||||
if limitStr == "" {
|
||||
return DefaultExportRowCountLimit, nil
|
||||
} else {
|
||||
limit, err := strconv.Atoi(limitStr)
|
||||
if err != nil {
|
||||
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid limit format: %s", err.Error())
|
||||
}
|
||||
if limit <= 0 {
|
||||
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be positive")
|
||||
}
|
||||
if limit > MaxExportRowCountLimit {
|
||||
return 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "limit cannot be more than %d", MaxExportRowCountLimit)
|
||||
}
|
||||
return limit, nil
|
||||
}
|
||||
}
|
||||
|
||||
func getExportQueryTimeRange(queryParams url.Values) (uint64, uint64, error) {
|
||||
|
||||
startTimeStr := queryParams.Get("start")
|
||||
endTimeStr := queryParams.Get("end")
|
||||
|
||||
if startTimeStr == "" || endTimeStr == "" {
|
||||
return 0, 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "start and end time are required")
|
||||
}
|
||||
startTime, err := strconv.ParseUint(startTimeStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid start time format: %s", err.Error())
|
||||
}
|
||||
endTime, err := strconv.ParseUint(endTimeStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid end time format: %s", err.Error())
|
||||
}
|
||||
return startTime, endTime, nil
|
||||
}
|
||||
|
||||
func constructCSVHeaderFromQueryResponse(data map[string]any) []string {
|
||||
header := make([]string, 0, len(data))
|
||||
for key := range data {
|
||||
@@ -427,9 +404,12 @@ func constructCSVRecordFromQueryResponse(data map[string]any, headerToIndexMappi
|
||||
valueStr = v.String()
|
||||
|
||||
default:
|
||||
// For all other complex types (maps, structs, etc.)
|
||||
jsonBytes, _ := json.Marshal(v)
|
||||
valueStr = string(jsonBytes)
|
||||
jsonBytes, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
valueStr = fmt.Sprintf("%v", v)
|
||||
} else {
|
||||
valueStr = string(jsonBytes)
|
||||
}
|
||||
}
|
||||
|
||||
record[index] = sanitizeForCSV(valueStr)
|
||||
@@ -438,23 +418,17 @@ func constructCSVRecordFromQueryResponse(data map[string]any, headerToIndexMappi
|
||||
return record
|
||||
}
|
||||
|
||||
// getExportQueryColumns parses the "columns" query parameters and returns a slice of TelemetryFieldKey structs.
|
||||
// Each column should be a valid telemetry field key in the format "context.field:type" or "context.field" or "field"
|
||||
func getExportQueryColumns(queryParams url.Values) []telemetrytypes.TelemetryFieldKey {
|
||||
columnParams := queryParams["columns"]
|
||||
|
||||
// parseExportQueryColumns converts bound column strings to TelemetryFieldKey structs.
|
||||
// Each column should be in the format "context.field:type" or "context.field" or "field"
|
||||
func parseExportQueryColumns(columnParams []string) []telemetrytypes.TelemetryFieldKey {
|
||||
columns := make([]telemetrytypes.TelemetryFieldKey, 0, len(columnParams))
|
||||
|
||||
for _, columnStr := range columnParams {
|
||||
// Skip empty strings
|
||||
columnStr = strings.TrimSpace(columnStr)
|
||||
if columnStr == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
columns = append(columns, telemetrytypes.GetFieldKeyFromKeyText(columnStr))
|
||||
}
|
||||
|
||||
return columns
|
||||
}
|
||||
|
||||
@@ -466,51 +440,24 @@ func getsizeOfStringSlice(slice []string) uint64 {
|
||||
return totalBytes
|
||||
}
|
||||
|
||||
// getExportQueryOrderBy parses the "order_by" query parameters and returns a slice of OrderBy structs.
|
||||
// Each "order_by" parameter should be in the format "column:direction"
|
||||
// Each "column" should be a valid telemetry field key in the format "context.field:type" or "context.field" or "field"
|
||||
func getExportQueryOrderBy(queryParams url.Values) ([]qbtypes.OrderBy, error) {
|
||||
orderByParam := queryParams.Get("order_by")
|
||||
|
||||
// parseExportQueryOrderBy converts a bound order_by string to an OrderBy slice.
|
||||
// The string should be in the format "column:direction" and is assumed already validated.
|
||||
func parseExportQueryOrderBy(orderByParam string) ([]qbtypes.OrderBy, error) {
|
||||
orderByParam = strings.TrimSpace(orderByParam)
|
||||
if orderByParam == "" {
|
||||
return telemetrylogs.DefaultLogsV2SortingOrder, nil
|
||||
return []qbtypes.OrderBy{}, nil
|
||||
}
|
||||
|
||||
parts := strings.Split(orderByParam, ":")
|
||||
if len(parts) != 2 && len(parts) != 3 {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order_by format: %s, should be <column>:<direction>", orderByParam)
|
||||
}
|
||||
|
||||
column := strings.Join(parts[:len(parts)-1], ":")
|
||||
direction := parts[len(parts)-1]
|
||||
|
||||
orderDirection, ok := qbtypes.OrderDirectionMap[direction]
|
||||
if !ok {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order_by direction: %s, should be one of %s, %s", direction, qbtypes.OrderDirectionAsc, qbtypes.OrderDirectionDesc)
|
||||
}
|
||||
|
||||
orderByKey := telemetrytypes.GetFieldKeyFromKeyText(column)
|
||||
|
||||
orderBy := []qbtypes.OrderBy{
|
||||
return []qbtypes.OrderBy{
|
||||
{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: orderByKey,
|
||||
TelemetryFieldKey: telemetrytypes.GetFieldKeyFromKeyText(column),
|
||||
},
|
||||
Direction: orderDirection,
|
||||
Direction: qbtypes.OrderDirectionMap[direction],
|
||||
},
|
||||
}
|
||||
|
||||
// If we are ordering by the timestamp column, also order by the ID column
|
||||
if orderByKey.Name == telemetrylogs.LogsV2TimestampColumn {
|
||||
orderBy = append(orderBy, qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
Direction: orderDirection,
|
||||
})
|
||||
}
|
||||
return orderBy, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -2,261 +2,162 @@ package implrawdataexport
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/telemetrylogs"
|
||||
"github.com/SigNoz/signoz/pkg/http/binding"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetExportQuerySource(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams url.Values
|
||||
expectedSource string
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "default logs source",
|
||||
queryParams: url.Values{},
|
||||
expectedSource: "logs",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "explicit logs source",
|
||||
queryParams: url.Values{"source": {"logs"}},
|
||||
expectedSource: "logs",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "metrics source - not supported",
|
||||
queryParams: url.Values{"source": {"metrics"}},
|
||||
expectedSource: "metrics",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "traces source - not supported",
|
||||
queryParams: url.Values{"source": {"traces"}},
|
||||
expectedSource: "traces",
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid source",
|
||||
queryParams: url.Values{"source": {"invalid"}},
|
||||
expectedSource: "",
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
func TestExportRawDataQueryParams_BindingDefaults(t *testing.T) {
|
||||
var params types.ExportRawDataQueryParams
|
||||
err := binding.Query.BindQuery(url.Values{}, ¶ms)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "logs", params.Source)
|
||||
assert.Equal(t, "csv", params.Format)
|
||||
assert.Equal(t, DefaultExportRowCountLimit, params.Limit)
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
source, err := getExportQuerySource(tt.queryParams)
|
||||
assert.Equal(t, tt.expectedSource, source)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
func logQuery(limit int) qbtypes.QueryEnvelope {
|
||||
return qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.LogAggregation]{Limit: limit},
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExportQueryFormat(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams url.Values
|
||||
expectedFormat string
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "default csv format",
|
||||
queryParams: url.Values{},
|
||||
expectedFormat: "csv",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "explicit csv format",
|
||||
queryParams: url.Values{"format": {"csv"}},
|
||||
expectedFormat: "csv",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "jsonl format",
|
||||
queryParams: url.Values{"format": {"jsonl"}},
|
||||
expectedFormat: "jsonl",
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid format",
|
||||
queryParams: url.Values{"format": {"xml"}},
|
||||
expectedFormat: "",
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
format, err := getExportQueryFormat(tt.queryParams)
|
||||
assert.Equal(t, tt.expectedFormat, format)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
func traceQuery(limit int) qbtypes.QueryEnvelope {
|
||||
return qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{Limit: limit},
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExportQueryLimit(t *testing.T) {
|
||||
func traceOperatorQuery(limit int) qbtypes.QueryEnvelope {
|
||||
return qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeTraceOperator,
|
||||
Spec: qbtypes.QueryBuilderTraceOperator{Limit: limit},
|
||||
}
|
||||
}
|
||||
|
||||
func makeRequest(queries ...qbtypes.QueryEnvelope) qbtypes.QueryRangeRequest {
|
||||
return qbtypes.QueryRangeRequest{
|
||||
CompositeQuery: qbtypes.CompositeQuery{Queries: queries},
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateAndApplyExportLimits(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams url.Values
|
||||
expectedLimit int
|
||||
req qbtypes.QueryRangeRequest
|
||||
expectedError bool
|
||||
// assertions on each query after the call (indexed)
|
||||
checkQueries func(t *testing.T, queries []qbtypes.QueryEnvelope)
|
||||
}{
|
||||
{
|
||||
name: "default limit",
|
||||
queryParams: url.Values{},
|
||||
expectedLimit: DefaultExportRowCountLimit,
|
||||
expectedError: false,
|
||||
name: "single log query, zero limit gets default",
|
||||
req: makeRequest(logQuery(0)),
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.Equal(t, DefaultExportRowCountLimit, q[0].GetLimit())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid limit",
|
||||
queryParams: url.Values{"limit": {"5000"}},
|
||||
expectedLimit: 5000,
|
||||
expectedError: false,
|
||||
name: "single log query, valid limit kept",
|
||||
req: makeRequest(logQuery(1000)),
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.Equal(t, 1000, q[0].GetLimit())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "maximum limit",
|
||||
queryParams: url.Values{"limit": {strconv.Itoa(MaxExportRowCountLimit)}},
|
||||
expectedLimit: MaxExportRowCountLimit,
|
||||
expectedError: false,
|
||||
name: "single log query, max limit kept",
|
||||
req: makeRequest(logQuery(MaxExportRowCountLimit)),
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.Equal(t, MaxExportRowCountLimit, q[0].GetLimit())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "limit exceeds maximum",
|
||||
queryParams: url.Values{"limit": {"100000"}},
|
||||
expectedLimit: 0,
|
||||
name: "single log query, limit exceeds max",
|
||||
req: makeRequest(logQuery(MaxExportRowCountLimit + 1)),
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid limit format",
|
||||
queryParams: url.Values{"limit": {"invalid"}},
|
||||
expectedLimit: 0,
|
||||
name: "single log query, negative limit",
|
||||
req: makeRequest(logQuery(-1)),
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "negative limit",
|
||||
queryParams: url.Values{"limit": {"-100"}},
|
||||
expectedLimit: 0,
|
||||
name: "single trace query, zero limit gets default",
|
||||
req: makeRequest(traceQuery(0)),
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.Equal(t, DefaultExportRowCountLimit, q[0].GetLimit())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple queries without trace operator",
|
||||
req: makeRequest(logQuery(0), traceQuery(0)),
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "trace operator with other queries — non-operator disabled",
|
||||
req: makeRequest(logQuery(500), traceOperatorQuery(1000)),
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.True(t, q[0].IsDisabled(), "log query should be disabled")
|
||||
assert.False(t, q[1].IsDisabled(), "trace operator should stay enabled")
|
||||
assert.Equal(t, 1000, q[1].GetLimit())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "trace operator alone, zero limit gets default",
|
||||
req: makeRequest(traceOperatorQuery(0)),
|
||||
checkQueries: func(t *testing.T, q []qbtypes.QueryEnvelope) {
|
||||
assert.Equal(t, DefaultExportRowCountLimit, q[0].GetLimit())
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unsupported query type",
|
||||
req: makeRequest(qbtypes.QueryEnvelope{Type: qbtypes.QueryTypeBuilder, Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{}}),
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
limit, err := getExportQueryLimit(tt.queryParams)
|
||||
assert.Equal(t, tt.expectedLimit, limit)
|
||||
err := validateAndApplyExportLimits(&tt.req)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
if tt.checkQueries != nil {
|
||||
tt.checkQueries(t, tt.req.CompositeQuery.Queries)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExportQueryTimeRange(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams url.Values
|
||||
expectedStartTime uint64
|
||||
expectedEndTime uint64
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "valid time range",
|
||||
queryParams: url.Values{
|
||||
"start": {"1640995200"},
|
||||
"end": {"1641081600"},
|
||||
},
|
||||
expectedStartTime: 1640995200,
|
||||
expectedEndTime: 1641081600,
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "missing start time",
|
||||
queryParams: url.Values{"end": {"1641081600"}},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "missing end time",
|
||||
queryParams: url.Values{"start": {"1640995200"}},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "missing both times",
|
||||
queryParams: url.Values{},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid start time format",
|
||||
queryParams: url.Values{
|
||||
"start": {"invalid"},
|
||||
"end": {"1641081600"},
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "invalid end time format",
|
||||
queryParams: url.Values{
|
||||
"start": {"1640995200"},
|
||||
"end": {"invalid"},
|
||||
},
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
startTime, endTime, err := getExportQueryTimeRange(tt.queryParams)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, tt.expectedStartTime, startTime)
|
||||
assert.Equal(t, tt.expectedEndTime, endTime)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExportQueryColumns(t *testing.T) {
|
||||
func TestParseExportQueryColumns(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams url.Values
|
||||
input []string
|
||||
expectedColumns []telemetrytypes.TelemetryFieldKey
|
||||
}{
|
||||
{
|
||||
name: "no columns specified",
|
||||
queryParams: url.Values{},
|
||||
name: "empty input",
|
||||
input: []string{},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{},
|
||||
},
|
||||
{
|
||||
name: "single column",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp"},
|
||||
},
|
||||
name: "single column",
|
||||
input: []string{"timestamp"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multiple columns",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp", "message", "level"},
|
||||
},
|
||||
name: "multiple columns",
|
||||
input: []string{"timestamp", "message", "level"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "message"},
|
||||
@@ -264,211 +165,90 @@ func TestGetExportQueryColumns(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty column name (should be skipped)",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp", "", "level"},
|
||||
},
|
||||
name: "empty entry is skipped",
|
||||
input: []string{"timestamp", "", "level"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "whitespace column name (should be skipped)",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp", " ", "level"},
|
||||
},
|
||||
name: "whitespace-only entry is skipped",
|
||||
input: []string{"timestamp", " ", "level"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid column name with data type",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp", "attribute.user:string", "level"},
|
||||
},
|
||||
name: "column with context and type",
|
||||
input: []string{"attribute.user:string"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "user", FieldContext: telemetrytypes.FieldContextAttribute, FieldDataType: telemetrytypes.FieldDataTypeString},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid column name with dot notation",
|
||||
queryParams: url.Values{
|
||||
"columns": {"timestamp", "attribute.user.string", "level"},
|
||||
},
|
||||
name: "column with context, dot-notation name",
|
||||
input: []string{"attribute.user.string"},
|
||||
expectedColumns: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "timestamp"},
|
||||
{Name: "user.string", FieldContext: telemetrytypes.FieldContextAttribute},
|
||||
{Name: "level"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
columns := getExportQueryColumns(tt.queryParams)
|
||||
columns := parseExportQueryColumns(tt.input)
|
||||
assert.Equal(t, len(tt.expectedColumns), len(columns))
|
||||
for i, expectedCol := range tt.expectedColumns {
|
||||
assert.Equal(t, expectedCol, columns[i])
|
||||
for i, expected := range tt.expectedColumns {
|
||||
assert.Equal(t, expected, columns[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetExportQueryOrderBy(t *testing.T) {
|
||||
func TestParseExportQueryOrderBy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryParams url.Values
|
||||
input string
|
||||
expectedOrder []qbtypes.OrderBy
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "no order specified",
|
||||
queryParams: url.Values{},
|
||||
name: "empty string returns empty slice",
|
||||
input: "",
|
||||
expectedOrder: []qbtypes.OrderBy{},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "simple column asc",
|
||||
input: "timestamp:asc",
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "timestamp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "simple column desc",
|
||||
input: "timestamp:desc",
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2TimestampColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: "timestamp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "single order error, direction not specified",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"timestamp"},
|
||||
},
|
||||
expectedOrder: nil,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "single order no error",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"timestamp:asc"},
|
||||
},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2TimestampColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "multiple orders",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"timestamp:asc", "body:desc", "id:asc"},
|
||||
},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2TimestampColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "empty order name (should be skipped)",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"timestamp:asc", "", "id:asc"},
|
||||
},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2TimestampColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "whitespace order name (should be skipped)",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"timestamp:asc", " ", "id:asc"},
|
||||
},
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2TimestampColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: telemetrylogs.LogsV2IDColumn,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid order name (should error out)",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"attributes.user:", "id:asc"},
|
||||
},
|
||||
expectedOrder: nil,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
name: "valid order name (should be included)",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"attribute.user:string:desc", "id:asc"},
|
||||
},
|
||||
name: "column with context and type qualifier",
|
||||
input: "attribute.user:string:desc",
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
@@ -484,10 +264,8 @@ func TestGetExportQueryOrderBy(t *testing.T) {
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "valid order name (should be included)",
|
||||
queryParams: url.Values{
|
||||
"order_by": {"attribute.user.string:desc", "id:asc"},
|
||||
},
|
||||
name: "column with context, dot-notation name",
|
||||
input: "attribute.user.string:desc",
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
@@ -501,18 +279,35 @@ func TestGetExportQueryOrderBy(t *testing.T) {
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
{
|
||||
name: "resource with context and type",
|
||||
input: "resource.service.name:string:asc",
|
||||
expectedOrder: []qbtypes.OrderBy{
|
||||
{
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
order, err := getExportQueryOrderBy(tt.queryParams)
|
||||
order, err := parseExportQueryOrderBy(tt.input)
|
||||
if tt.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(tt.expectedOrder), len(order))
|
||||
for i, expectedOrd := range tt.expectedOrder {
|
||||
assert.Equal(t, expectedOrd, order[i])
|
||||
for i, expected := range tt.expectedOrder {
|
||||
assert.Equal(t, expected, order[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -529,10 +324,8 @@ func TestConstructCSVHeaderFromQueryResponse(t *testing.T) {
|
||||
|
||||
header := constructCSVHeaderFromQueryResponse(data)
|
||||
|
||||
// Since map iteration order is not guaranteed, check that all expected keys are present
|
||||
expectedKeys := []string{"timestamp", "message", "level", "id"}
|
||||
assert.Equal(t, len(expectedKeys), len(header))
|
||||
|
||||
for _, key := range expectedKeys {
|
||||
assert.Contains(t, header, key)
|
||||
}
|
||||
|
||||
@@ -23,8 +23,26 @@ func NewModule(querier querier.Querier) rawdataexport.Module {
|
||||
|
||||
func (m *Module) ExportRawData(ctx context.Context, orgID valuer.UUID, rangeRequest *qbtypes.QueryRangeRequest, doneChan chan any) (chan *qbtypes.RawRow, chan error) {
|
||||
|
||||
spec := rangeRequest.CompositeQuery.Queries[0].Spec.(qbtypes.QueryBuilderQuery[qbtypes.LogAggregation])
|
||||
rowCountLimit := spec.Limit
|
||||
isTraceOperatorQueryPresent := false
|
||||
traceOperatorQueryIndex := -1
|
||||
|
||||
queries := rangeRequest.CompositeQuery.Queries
|
||||
for idx := range len(queries) {
|
||||
if _, ok := queries[idx].Spec.(qbtypes.QueryBuilderTraceOperator); ok {
|
||||
isTraceOperatorQueryPresent = true
|
||||
traceOperatorQueryIndex = idx
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If the trace operator query is present, mark the queries other than trace operator as disabled
|
||||
if isTraceOperatorQueryPresent {
|
||||
for idx := range len(queries) {
|
||||
if idx != traceOperatorQueryIndex {
|
||||
queries[idx].SetDisabled(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rowChan := make(chan *qbtypes.RawRow, 1)
|
||||
errChan := make(chan error, 1)
|
||||
@@ -38,52 +56,62 @@ func (m *Module) ExportRawData(ctx context.Context, orgID valuer.UUID, rangeRequ
|
||||
defer close(errChan)
|
||||
defer close(rowChan)
|
||||
|
||||
rowCount := 0
|
||||
|
||||
for rowCount < rowCountLimit {
|
||||
spec.Limit = min(ChunkSize, rowCountLimit-rowCount)
|
||||
spec.Offset = rowCount
|
||||
|
||||
rangeRequest.CompositeQuery.Queries[0].Spec = spec
|
||||
|
||||
response, err := m.querier.QueryRange(contextWithTimeout, orgID, rangeRequest)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
newRowsCount := 0
|
||||
for _, result := range response.Data.Results {
|
||||
resultData, ok := result.(*qbtypes.RawData)
|
||||
if !ok {
|
||||
errChan <- errors.NewInternalf(errors.CodeInternal, "expected RawData, got %T", result)
|
||||
return
|
||||
}
|
||||
|
||||
newRowsCount += len(resultData.Rows)
|
||||
for _, row := range resultData.Rows {
|
||||
select {
|
||||
case rowChan <- row:
|
||||
case <-doneChan:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
errChan <- ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Break if we did not receive any new rows
|
||||
if newRowsCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
rowCount += newRowsCount
|
||||
|
||||
if isTraceOperatorQueryPresent {
|
||||
// If the trace operator query is present, we need to export the data for the trace operator query only
|
||||
exportRawDataForSingleQuery(m.querier, contextWithTimeout, orgID, rangeRequest, rowChan, errChan, doneChan, traceOperatorQueryIndex)
|
||||
} else {
|
||||
// If the trace operator query is not present, we need to export the data for the first query only
|
||||
exportRawDataForSingleQuery(m.querier, contextWithTimeout, orgID, rangeRequest, rowChan, errChan, doneChan, 0)
|
||||
}
|
||||
}()
|
||||
|
||||
return rowChan, errChan
|
||||
|
||||
}
|
||||
|
||||
func exportRawDataForSingleQuery(querier querier.Querier, ctx context.Context, orgID valuer.UUID, rangeRequest *qbtypes.QueryRangeRequest, rowChan chan *qbtypes.RawRow, errChan chan error, doneChan chan any, queryIndex int) {
|
||||
|
||||
query := rangeRequest.CompositeQuery.Queries[queryIndex]
|
||||
rowCountLimit := query.GetLimit()
|
||||
rowCount := 0
|
||||
|
||||
for rowCount < rowCountLimit {
|
||||
chunkSize := min(ChunkSize, rowCountLimit-rowCount)
|
||||
query.SetLimit(chunkSize)
|
||||
query.SetOffset(rowCount)
|
||||
|
||||
response, err := querier.QueryRange(ctx, orgID, rangeRequest)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
newRowsCount := 0
|
||||
for _, result := range response.Data.Results {
|
||||
resultData, ok := result.(*qbtypes.RawData)
|
||||
if !ok {
|
||||
errChan <- errors.NewInternalf(errors.CodeInternal, "expected RawData, got %T", result)
|
||||
return
|
||||
}
|
||||
|
||||
newRowsCount += len(resultData.Rows)
|
||||
for _, row := range resultData.Rows {
|
||||
select {
|
||||
case rowChan <- row:
|
||||
case <-doneChan:
|
||||
return
|
||||
case <-ctx.Done():
|
||||
errChan <- ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rowCount += newRowsCount
|
||||
|
||||
// Stop if we received fewer rows than requested — no more data available
|
||||
if newRowsCount < chunkSize {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -585,9 +585,6 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
aH.LicensingAPI.Activate(rw, req)
|
||||
})).Methods(http.MethodGet)
|
||||
|
||||
// Export
|
||||
router.HandleFunc("/api/v1/export_raw_data", am.ViewAccess(aH.Signoz.Handlers.RawDataExport.ExportRawData)).Methods(http.MethodGet)
|
||||
|
||||
router.HandleFunc("/api/v1/span_percentile", am.ViewAccess(aH.Signoz.Handlers.SpanPercentile.GetSpanPercentileDetails)).Methods(http.MethodPost)
|
||||
|
||||
// Query Filter Analyzer api used to extract metric names and grouping columns from a query
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
"github.com/SigNoz/signoz/pkg/modules/promote"
|
||||
"github.com/SigNoz/signoz/pkg/modules/rawdataexport"
|
||||
"github.com/SigNoz/signoz/pkg/modules/session"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
@@ -58,6 +59,7 @@ func NewOpenAPI(ctx context.Context, instrumentation instrumentation.Instrumenta
|
||||
struct{ gateway.Handler }{},
|
||||
struct{ fields.Handler }{},
|
||||
struct{ authz.Handler }{},
|
||||
struct{ rawdataexport.Handler }{},
|
||||
).New(ctx, instrumentation.ToProviderSettings(), apiserver.Config{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -252,6 +252,7 @@ func NewAPIServerProviderFactories(orgGetter organization.Getter, authz authz.Au
|
||||
handlers.GatewayHandler,
|
||||
handlers.Fields,
|
||||
handlers.AuthzHandler,
|
||||
handlers.RawDataExport,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
386
pkg/types/querybuildertypes/querybuildertypesv5/req_getters.go
Normal file
386
pkg/types/querybuildertypes/querybuildertypesv5/req_getters.go
Normal file
@@ -0,0 +1,386 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
|
||||
// GetExpression returns the expression string. Panics for Join, PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetExpression() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Expression
|
||||
case QueryBuilderFormula:
|
||||
return spec.Expression
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetReturnSpansFrom returns the return-spans-from value. Panics for all types except TraceOperator.
|
||||
func (q *QueryEnvelope) GetReturnSpansFrom() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.ReturnSpansFrom
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetSignal returns the signal. Panics for all types except Query[T].
|
||||
func (q *QueryEnvelope) GetSignal() telemetrytypes.Signal {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Signal
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Signal
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Signal
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetSource returns the source. Panics for all types except Query[T].
|
||||
func (q *QueryEnvelope) GetSource() telemetrytypes.Source {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Source
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Source
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Source
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetQuery returns the raw query string. Panics for all types except PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetQuery() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case PromQuery:
|
||||
return spec.Query
|
||||
case ClickHouseQuery:
|
||||
return spec.Query
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetStep returns the PromQL step size. Panics for all types except PromQuery.
|
||||
func (q *QueryEnvelope) GetStep() Step {
|
||||
switch spec := q.Spec.(type) {
|
||||
case PromQuery:
|
||||
return spec.Step
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetStats returns the PromQL stats flag. Panics for all types except PromQuery.
|
||||
func (q *QueryEnvelope) GetStats() bool {
|
||||
switch spec := q.Spec.(type) {
|
||||
case PromQuery:
|
||||
return spec.Stats
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetLeft returns the left query reference of a join. Panics for all types except QueryBuilderJoin.
|
||||
func (q *QueryEnvelope) GetLeft() QueryRef {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
return spec.Left
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetRight returns the right query reference of a join. Panics for all types except QueryBuilderJoin.
|
||||
func (q *QueryEnvelope) GetRight() QueryRef {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
return spec.Right
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetJoinType returns the join type. Panics for all types except QueryBuilderJoin.
|
||||
func (q *QueryEnvelope) GetJoinType() JoinType {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
return spec.Type
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetOn returns the join ON condition. Panics for all types except QueryBuilderJoin.
|
||||
func (q *QueryEnvelope) GetOn() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
return spec.On
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetQueryName returns the name of the spec.
|
||||
func (q *QueryEnvelope) GetQueryName() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderFormula:
|
||||
return spec.Name
|
||||
case QueryBuilderJoin:
|
||||
return spec.Name
|
||||
case PromQuery:
|
||||
return spec.Name
|
||||
case ClickHouseQuery:
|
||||
return spec.Name
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// IsDisabled returns whether the spec is disabled.
|
||||
func (q *QueryEnvelope) IsDisabled() bool {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderFormula:
|
||||
return spec.Disabled
|
||||
case QueryBuilderJoin:
|
||||
return spec.Disabled
|
||||
case PromQuery:
|
||||
return spec.Disabled
|
||||
case ClickHouseQuery:
|
||||
return spec.Disabled
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetLimit returns the row limit. Panics for PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetLimit() int {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Limit
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Limit
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Limit
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Limit
|
||||
case QueryBuilderFormula:
|
||||
return spec.Limit
|
||||
case QueryBuilderJoin:
|
||||
return spec.Limit
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetOffset returns the row offset. Panics for Formula, Join, PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetOffset() int {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Offset
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Offset
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Offset
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Offset
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetType returns the QueryType of the envelope.
|
||||
func (q *QueryEnvelope) GetType() QueryType {
|
||||
return q.Type
|
||||
}
|
||||
|
||||
// GetOrder returns the order-by clauses. Panics for PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetOrder() []OrderBy {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Order
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Order
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Order
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Order
|
||||
case QueryBuilderFormula:
|
||||
return spec.Order
|
||||
case QueryBuilderJoin:
|
||||
return spec.Order
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetGroupBy returns the group-by keys. Panics for Formula, PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetGroupBy() []GroupByKey {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.GroupBy
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.GroupBy
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.GroupBy
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.GroupBy
|
||||
case QueryBuilderJoin:
|
||||
return spec.GroupBy
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetFilter returns the filter. Panics for Formula, PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetFilter() *Filter {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Filter
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Filter
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Filter
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Filter
|
||||
case QueryBuilderJoin:
|
||||
return spec.Filter
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetHaving returns the having clause. Panics for PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetHaving() *Having {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Having
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Having
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Having
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Having
|
||||
case QueryBuilderFormula:
|
||||
return spec.Having
|
||||
case QueryBuilderJoin:
|
||||
return spec.Having
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetFunctions returns the post-processing functions. Panics for PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetFunctions() []Function {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Functions
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Functions
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Functions
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Functions
|
||||
case QueryBuilderFormula:
|
||||
return spec.Functions
|
||||
case QueryBuilderJoin:
|
||||
return spec.Functions
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetSelectFields returns the selected fields. Panics for Formula, PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetSelectFields() []telemetrytypes.TelemetryFieldKey {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.SelectFields
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.SelectFields
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.SelectFields
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.SelectFields
|
||||
case QueryBuilderJoin:
|
||||
return spec.SelectFields
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetLegend returns the legend label. Panics for QueryBuilderJoin.
|
||||
func (q *QueryEnvelope) GetLegend() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Legend
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Legend
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Legend
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Legend
|
||||
case QueryBuilderFormula:
|
||||
return spec.Legend
|
||||
case PromQuery:
|
||||
return spec.Legend
|
||||
case ClickHouseQuery:
|
||||
return spec.Legend
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetCursor returns the pagination cursor. Panics for Formula, Join, PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetCursor() string {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Cursor
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Cursor
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Cursor
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Cursor
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetStepInterval returns the step interval. Panics for Formula, Join, PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetStepInterval() Step {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.StepInterval
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.StepInterval
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.StepInterval
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.StepInterval
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetSecondaryAggregations returns the secondary aggregations. Panics for TraceOperator, Formula, PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetSecondaryAggregations() []SecondaryAggregation {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.SecondaryAggregations
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.SecondaryAggregations
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.SecondaryAggregations
|
||||
case QueryBuilderJoin:
|
||||
return spec.SecondaryAggregations
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
|
||||
// GetLimitBy returns the limit-by configuration. Panics for TraceOperator, Formula, Join, PromQuery and ClickHouseQuery.
|
||||
func (q *QueryEnvelope) GetLimitBy() *LimitBy {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.LimitBy
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.LimitBy
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.LimitBy
|
||||
}
|
||||
panic("unsupported spec type")
|
||||
}
|
||||
458
pkg/types/querybuildertypes/querybuildertypesv5/req_setters.go
Normal file
458
pkg/types/querybuildertypes/querybuildertypesv5/req_setters.go
Normal file
@@ -0,0 +1,458 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import "github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
|
||||
// SetExpression sets the expression string of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetExpression(expression string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Expression = expression
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Expression = expression
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetReturnSpansFrom sets the return-spans-from value, if applicable.
|
||||
func (q *QueryEnvelope) SetReturnSpansFrom(returnSpansFrom string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.ReturnSpansFrom = returnSpansFrom
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetSignal sets the signal of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetSignal(signal telemetrytypes.Signal) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Signal = signal
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Signal = signal
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Signal = signal
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetSource sets the source of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetSource(source telemetrytypes.Source) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Source = source
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Source = source
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Source = source
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetQuery sets the raw query string of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetQuery(query string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case PromQuery:
|
||||
spec.Query = query
|
||||
q.Spec = spec
|
||||
case ClickHouseQuery:
|
||||
spec.Query = query
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetStep sets the PromQL step size, if applicable.
|
||||
func (q *QueryEnvelope) SetStep(step Step) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case PromQuery:
|
||||
spec.Step = step
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetStats sets the PromQL stats flag, if applicable.
|
||||
func (q *QueryEnvelope) SetStats(stats bool) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case PromQuery:
|
||||
spec.Stats = stats
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetLeft sets the left query reference of a join, if applicable.
|
||||
func (q *QueryEnvelope) SetLeft(left QueryRef) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
spec.Left = left
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetRight sets the right query reference of a join, if applicable.
|
||||
func (q *QueryEnvelope) SetRight(right QueryRef) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
spec.Right = right
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetJoinType sets the join type, if applicable.
|
||||
func (q *QueryEnvelope) SetJoinType(joinType JoinType) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
spec.Type = joinType
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetOn sets the join ON condition, if applicable.
|
||||
func (q *QueryEnvelope) SetOn(on string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderJoin:
|
||||
spec.On = on
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetQueryName sets the name of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetQueryName(name string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case PromQuery:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
case ClickHouseQuery:
|
||||
spec.Name = name
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetDisabled sets the disabled flag of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetDisabled(disabled bool) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case PromQuery:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
case ClickHouseQuery:
|
||||
spec.Disabled = disabled
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetLimit sets the row limit of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetLimit(limit int) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Limit = limit
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetOffset sets the row offset of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetOffset(offset int) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Offset = offset
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Offset = offset
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Offset = offset
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Offset = offset
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetType sets the QueryType of the envelope.
|
||||
func (q *QueryEnvelope) SetType(t QueryType) {
|
||||
q.Type = t
|
||||
}
|
||||
|
||||
// SetOrder sets the order-by clauses of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetOrder(order []OrderBy) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Order = order
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetGroupBy sets the group-by keys of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetGroupBy(groupBy []GroupByKey) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.GroupBy = groupBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.GroupBy = groupBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.GroupBy = groupBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.GroupBy = groupBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.GroupBy = groupBy
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetFilter sets the filter of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetFilter(filter *Filter) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Filter = filter
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Filter = filter
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Filter = filter
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Filter = filter
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Filter = filter
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetHaving sets the having clause of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetHaving(having *Having) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Having = having
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetFunctions sets the post-processing functions of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetFunctions(functions []Function) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.Functions = functions
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetSelectFields sets the selected fields of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetSelectFields(fields []telemetrytypes.TelemetryFieldKey) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.SelectFields = fields
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.SelectFields = fields
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.SelectFields = fields
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.SelectFields = fields
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.SelectFields = fields
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetLegend sets the legend label of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetLegend(legend string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case QueryBuilderFormula:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case PromQuery:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
case ClickHouseQuery:
|
||||
spec.Legend = legend
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetCursor sets the pagination cursor of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetCursor(cursor string) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.Cursor = cursor
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.Cursor = cursor
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.Cursor = cursor
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.Cursor = cursor
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetStepInterval sets the step interval of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetStepInterval(step Step) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderTraceOperator:
|
||||
spec.StepInterval = step
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.StepInterval = step
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.StepInterval = step
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.StepInterval = step
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetSecondaryAggregations sets the secondary aggregations of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetSecondaryAggregations(secondaryAggregations []SecondaryAggregation) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.SecondaryAggregations = secondaryAggregations
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.SecondaryAggregations = secondaryAggregations
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.SecondaryAggregations = secondaryAggregations
|
||||
q.Spec = spec
|
||||
case QueryBuilderJoin:
|
||||
spec.SecondaryAggregations = secondaryAggregations
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
|
||||
// SetLimitBy sets the limit-by configuration of the spec, if applicable.
|
||||
func (q *QueryEnvelope) SetLimitBy(limitBy *LimitBy) {
|
||||
switch spec := q.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
spec.LimitBy = limitBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
spec.LimitBy = limitBy
|
||||
q.Spec = spec
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
spec.LimitBy = limitBy
|
||||
q.Spec = spec
|
||||
}
|
||||
}
|
||||
@@ -10,55 +10,9 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
// queryName returns the name from any query envelope spec type.
|
||||
func (e QueryEnvelope) queryName() string {
|
||||
switch spec := e.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Name
|
||||
case QueryBuilderFormula:
|
||||
return spec.Name
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Name
|
||||
case QueryBuilderJoin:
|
||||
return spec.Name
|
||||
case PromQuery:
|
||||
return spec.Name
|
||||
case ClickHouseQuery:
|
||||
return spec.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// isDisabled returns the disabled status from any query envelope spec type.
|
||||
func (e QueryEnvelope) isDisabled() bool {
|
||||
switch spec := e.Spec.(type) {
|
||||
case QueryBuilderQuery[TraceAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[LogAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderQuery[MetricAggregation]:
|
||||
return spec.Disabled
|
||||
case QueryBuilderFormula:
|
||||
return spec.Disabled
|
||||
case QueryBuilderTraceOperator:
|
||||
return spec.Disabled
|
||||
case QueryBuilderJoin:
|
||||
return spec.Disabled
|
||||
case PromQuery:
|
||||
return spec.Disabled
|
||||
case ClickHouseQuery:
|
||||
return spec.Disabled
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getQueryIdentifier returns a friendly identifier for a query based on its type and name/content
|
||||
func getQueryIdentifier(envelope QueryEnvelope, index int) string {
|
||||
name := envelope.queryName()
|
||||
name := envelope.GetQueryName()
|
||||
|
||||
var typeLabel string
|
||||
switch envelope.Type {
|
||||
@@ -471,7 +425,7 @@ func (r *QueryRangeRequest) Validate() error {
|
||||
// validateAllQueriesNotDisabled validates that at least one query in the composite query is enabled
|
||||
func (r *QueryRangeRequest) validateAllQueriesNotDisabled() error {
|
||||
for _, envelope := range r.CompositeQuery.Queries {
|
||||
if !envelope.isDisabled() {
|
||||
if !envelope.IsDisabled() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -506,7 +460,7 @@ func (c *CompositeQuery) Validate(requestType RequestType) error {
|
||||
|
||||
// Check name uniqueness for builder queries
|
||||
if envelope.Type == QueryTypeBuilder || envelope.Type == QueryTypeSubQuery {
|
||||
name := envelope.queryName()
|
||||
name := envelope.GetQueryName()
|
||||
if name != "" {
|
||||
if queryNames[name] {
|
||||
return errors.NewInvalidInputf(
|
||||
|
||||
@@ -816,7 +816,7 @@ func TestQueryEnvelope_Helpers(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.envelope.queryName()
|
||||
got := tt.envelope.GetQueryName()
|
||||
if got != tt.want {
|
||||
t.Errorf("queryName() = %q, want %q", got, tt.want)
|
||||
}
|
||||
@@ -868,7 +868,7 @@ func TestQueryEnvelope_Helpers(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := tt.envelope.isDisabled()
|
||||
got := tt.envelope.IsDisabled()
|
||||
if got != tt.want {
|
||||
t.Errorf("isDisabled() = %v, want %v", got, tt.want)
|
||||
}
|
||||
|
||||
35
pkg/types/rawdataexport.go
Normal file
35
pkg/types/rawdataexport.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package types
|
||||
|
||||
// ExportRawDataQueryParams represents the query parameters for the export raw data endpoint
|
||||
type ExportRawDataQueryParams struct {
|
||||
ExportRawDataFormatQueryParam
|
||||
|
||||
// Source specifies the type of data to export: "logs" or "traces"
|
||||
Source string `query:"source,default=logs" default:"logs" enum:"logs,traces"`
|
||||
|
||||
// Start is the start time for the query (Unix timestamp in nanoseconds)
|
||||
Start uint64 `query:"start"`
|
||||
|
||||
// End is the end time for the query (Unix timestamp in nanoseconds)
|
||||
End uint64 `query:"end"`
|
||||
|
||||
// Limit specifies the maximum number of rows to export
|
||||
Limit int `query:"limit,default=10000" default:"10000" minimum:"1" maximum:"50000"`
|
||||
|
||||
// Filter is a filter expression to apply to the query
|
||||
Filter string `query:"filter"`
|
||||
|
||||
// Columns specifies the columns to include in the export
|
||||
// Format: ["context.field:type", "context.field", "field"]
|
||||
Columns []string `query:"columns"`
|
||||
|
||||
// OrderBy specifies the sorting order
|
||||
// Format: "column:direction" or "context.field:type:direction"
|
||||
// Direction can be "asc" or "desc"
|
||||
OrderBy string `query:"order_by"`
|
||||
}
|
||||
|
||||
type ExportRawDataFormatQueryParam struct {
|
||||
// Format specifies the output format: "csv" or "jsonl"
|
||||
Format string `query:"format,default=csv" default:"csv" enum:"csv,jsonl"`
|
||||
}
|
||||
634
tests/integration/src/rawexportdata/01_logs.py
Normal file
634
tests/integration/src/rawexportdata/01_logs.py
Normal file
@@ -0,0 +1,634 @@
|
||||
import csv
|
||||
import io
|
||||
import json
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.logs import Logs
|
||||
|
||||
|
||||
def test_export_logs_csv(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 3 logs with different severity levels and attributes.
|
||||
|
||||
Tests:
|
||||
1. Export logs as CSV format
|
||||
2. Verify CSV structure and content
|
||||
3. Validate headers are present
|
||||
4. Check log data is correctly formatted
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="Application started successfully",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "api-service",
|
||||
"deployment.environment": "production",
|
||||
"host.name": "server-01",
|
||||
},
|
||||
attributes={
|
||||
"http.method": "GET",
|
||||
"http.status_code": 200,
|
||||
"user.id": "user123",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=8),
|
||||
body="Connection to database failed",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"service.name": "api-service",
|
||||
"deployment.environment": "production",
|
||||
"host.name": "server-01",
|
||||
},
|
||||
attributes={
|
||||
"error.type": "ConnectionError",
|
||||
"db.name": "production_db",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=5),
|
||||
body="Request processed",
|
||||
severity_text="DEBUG",
|
||||
resources={
|
||||
"service.name": "worker-service",
|
||||
"deployment.environment": "production",
|
||||
"host.name": "server-02",
|
||||
},
|
||||
attributes={
|
||||
"request.id": "req-456",
|
||||
"duration_ms": 150.5,
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
}
|
||||
|
||||
# Export logs as CSV (default format, no source needed)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=30,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "text/csv"
|
||||
assert "attachment" in response.headers.get("Content-Disposition", "")
|
||||
|
||||
# Parse CSV content
|
||||
csv_content = response.text
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_content))
|
||||
|
||||
rows = list(csv_reader)
|
||||
assert len(rows) == 3, f"Expected 3 rows, got {len(rows)}"
|
||||
|
||||
# Verify log bodies are present in the exported data
|
||||
bodies = [row.get("body") for row in rows]
|
||||
assert "Application started successfully" in bodies
|
||||
assert "Connection to database failed" in bodies
|
||||
assert "Request processed" in bodies
|
||||
|
||||
# Verify severity levels
|
||||
severities = [row.get("severity_text") for row in rows]
|
||||
assert "INFO" in severities
|
||||
assert "ERROR" in severities
|
||||
assert "DEBUG" in severities
|
||||
|
||||
|
||||
def test_export_logs_jsonl(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 2 logs with different attributes.
|
||||
|
||||
Tests:
|
||||
1. Export logs as JSONL format
|
||||
2. Verify JSONL structure and content
|
||||
3. Check each line is valid JSON
|
||||
4. Validate log data is correctly formatted
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="User logged in",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "auth-service",
|
||||
"deployment.environment": "staging",
|
||||
},
|
||||
attributes={
|
||||
"user.email": "test@example.com",
|
||||
"session.id": "sess-789",
|
||||
},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=5),
|
||||
body="Payment processed successfully",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "payment-service",
|
||||
"deployment.environment": "staging",
|
||||
},
|
||||
attributes={
|
||||
"transaction.id": "txn-123",
|
||||
"amount": 99.99,
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "logs",
|
||||
}
|
||||
|
||||
# Export logs as JSONL
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
assert "attachment" in response.headers.get("Content-Disposition", "")
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 2, f"Expected 2 lines, got {len(jsonl_lines)}"
|
||||
|
||||
# Verify each line is valid JSON
|
||||
json_objects = []
|
||||
for line in jsonl_lines:
|
||||
obj = json.loads(line)
|
||||
json_objects.append(obj)
|
||||
assert "id" in obj
|
||||
assert "timestamp" in obj
|
||||
assert "body" in obj
|
||||
assert "severity_text" in obj
|
||||
|
||||
# Verify log bodies
|
||||
bodies = [obj.get("body") for obj in json_objects]
|
||||
assert "User logged in" in bodies
|
||||
assert "Payment processed successfully" in bodies
|
||||
|
||||
|
||||
def test_export_logs_with_filter(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert logs with different severity levels.
|
||||
|
||||
Tests:
|
||||
1. Export logs with filter applied
|
||||
2. Verify only filtered logs are returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="Info message",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=8),
|
||||
body="Error message",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=5),
|
||||
body="Another error message",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "logs",
|
||||
"filter": "severity_text = 'ERROR'",
|
||||
}
|
||||
|
||||
# Export logs with filter
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 2, f"Expected 2 lines (filtered), got {len(jsonl_lines)}"
|
||||
|
||||
# Verify only ERROR logs are returned
|
||||
for line in jsonl_lines:
|
||||
obj = json.loads(line)
|
||||
assert obj["severity_text"] == "ERROR"
|
||||
assert "error message" in obj["body"].lower()
|
||||
|
||||
|
||||
def test_export_logs_with_limit(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 5 logs.
|
||||
|
||||
Tests:
|
||||
1. Export logs with limit applied
|
||||
2. Verify only limited number of logs are returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
logs = []
|
||||
for i in range(5):
|
||||
logs.append(
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=i),
|
||||
body=f"Log message {i}",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={
|
||||
"index": i,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
insert_logs(logs)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "csv",
|
||||
"source": "logs",
|
||||
"limit": 3,
|
||||
}
|
||||
|
||||
# Export logs with limit
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "text/csv"
|
||||
|
||||
# Parse CSV content
|
||||
csv_content = response.text
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_content))
|
||||
|
||||
rows = list(csv_reader)
|
||||
assert len(rows) == 3, f"Expected 3 rows (limited), got {len(rows)}"
|
||||
|
||||
|
||||
def test_export_logs_with_columns(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert logs with various attributes.
|
||||
|
||||
Tests:
|
||||
1. Export logs with specific columns
|
||||
2. Verify only specified columns are returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="Test log message",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
"deployment.environment": "production",
|
||||
},
|
||||
attributes={
|
||||
"http.method": "GET",
|
||||
"http.status_code": 200,
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
# Request only specific columns
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "csv",
|
||||
"source": "logs",
|
||||
"columns": ["timestamp", "severity_text", "body"],
|
||||
}
|
||||
|
||||
# Export logs with specific columns
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params, doseq=True)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "text/csv"
|
||||
|
||||
# Parse CSV content
|
||||
csv_content = response.text
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_content))
|
||||
|
||||
rows = list(csv_reader)
|
||||
assert len(rows) == 1
|
||||
|
||||
# Verify the specified columns are present
|
||||
row = rows[0]
|
||||
assert "timestamp" in row
|
||||
assert "severity_text" in row
|
||||
assert "body" in row
|
||||
assert row["severity_text"] == "INFO"
|
||||
assert row["body"] == "Test log message"
|
||||
|
||||
|
||||
def test_export_logs_with_order_by(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert logs at different timestamps.
|
||||
|
||||
Tests:
|
||||
1. Export logs with ascending timestamp order
|
||||
2. Verify logs are returned in correct order
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="First log",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=5),
|
||||
body="Second log",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
body="Third log",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "logs",
|
||||
"order_by": "timestamp:asc",
|
||||
}
|
||||
|
||||
# Export logs with ascending order
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 3
|
||||
|
||||
# Verify order - first log should be "First log" (oldest)
|
||||
json_objects = [json.loads(line) for line in jsonl_lines]
|
||||
assert json_objects[0]["body"] == "First log"
|
||||
assert json_objects[1]["body"] == "Second log"
|
||||
assert json_objects[2]["body"] == "Third log"
|
||||
|
||||
|
||||
def test_export_logs_with_complex_filter(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert logs with various service names and severity levels.
|
||||
|
||||
Tests:
|
||||
1. Export logs with complex filter (multiple conditions)
|
||||
2. Verify only logs matching all conditions are returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
body="API error occurred",
|
||||
severity_text="ERROR",
|
||||
resources={
|
||||
"service.name": "api-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=8),
|
||||
body="Worker info message",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "worker-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Logs(
|
||||
timestamp=now - timedelta(seconds=5),
|
||||
body="API info message",
|
||||
severity_text="INFO",
|
||||
resources={
|
||||
"service.name": "api-service",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
# Filter for api-service AND ERROR severity
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "logs",
|
||||
"filter": "service.name = 'api-service' AND severity_text = 'ERROR'",
|
||||
}
|
||||
|
||||
# Export logs with complex filter
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert (
|
||||
len(jsonl_lines) == 1
|
||||
), f"Expected 1 line (complex filter), got {len(jsonl_lines)}"
|
||||
|
||||
# Verify the filtered log
|
||||
filtered_obj = json.loads(jsonl_lines[0])
|
||||
assert filtered_obj["body"] == "API error occurred"
|
||||
assert filtered_obj["severity_text"] == "ERROR"
|
||||
782
tests/integration/src/rawexportdata/02_traces.py
Normal file
782
tests/integration/src/rawexportdata/02_traces.py
Normal file
@@ -0,0 +1,782 @@
|
||||
import csv
|
||||
import io
|
||||
import json
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
from urllib.parse import urlencode
|
||||
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
|
||||
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
|
||||
|
||||
|
||||
def test_export_traces_csv(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 3 traces with different attributes.
|
||||
|
||||
Tests:
|
||||
1. Export traces as CSV format
|
||||
2. Verify CSV structure and content
|
||||
3. Validate headers are present
|
||||
4. Check trace data is correctly formatted
|
||||
"""
|
||||
http_service_trace_id = TraceIdGenerator.trace_id()
|
||||
http_service_span_id = TraceIdGenerator.span_id()
|
||||
http_service_db_span_id = TraceIdGenerator.span_id()
|
||||
topic_service_trace_id = TraceIdGenerator.trace_id()
|
||||
topic_service_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=3),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_span_id,
|
||||
parent_span_id="",
|
||||
name="POST /integration",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3.5),
|
||||
duration=timedelta(seconds=0.5),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_db_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="SELECT",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
duration=timedelta(seconds=2),
|
||||
trace_id=topic_service_trace_id,
|
||||
span_id=topic_service_span_id,
|
||||
parent_span_id="",
|
||||
name="topic publish",
|
||||
kind=TracesKind.SPAN_KIND_PRODUCER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"source": "traces",
|
||||
"limit": 1000,
|
||||
}
|
||||
|
||||
# Export traces as CSV (GET for simple queries)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=30,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "text/csv"
|
||||
assert "attachment" in response.headers.get("Content-Disposition", "")
|
||||
|
||||
# Parse CSV content
|
||||
csv_content = response.text
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_content))
|
||||
|
||||
rows = list(csv_reader)
|
||||
assert len(rows) == 3, f"Expected 3 rows, got {len(rows)}"
|
||||
|
||||
# Verify trace IDs are present in the exported data
|
||||
trace_ids = [row.get("trace_id") for row in rows]
|
||||
assert http_service_trace_id in trace_ids
|
||||
assert topic_service_trace_id in trace_ids
|
||||
|
||||
# Verify span names are present
|
||||
span_names = [row.get("name") for row in rows]
|
||||
assert "POST /integration" in span_names
|
||||
assert "SELECT" in span_names
|
||||
assert "topic publish" in span_names
|
||||
|
||||
|
||||
def test_export_traces_jsonl(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 2 traces with different attributes.
|
||||
|
||||
Tests:
|
||||
1. Export traces as JSONL format
|
||||
2. Verify JSONL structure and content
|
||||
3. Check each line is valid JSON
|
||||
4. Validate trace data is correctly formatted
|
||||
"""
|
||||
http_service_trace_id = TraceIdGenerator.trace_id()
|
||||
http_service_span_id = TraceIdGenerator.span_id()
|
||||
topic_service_trace_id = TraceIdGenerator.trace_id()
|
||||
topic_service_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=3),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_span_id,
|
||||
parent_span_id="",
|
||||
name="POST /api/test",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "api-service",
|
||||
"deployment.environment": "staging",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "201",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=2),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=topic_service_trace_id,
|
||||
span_id=topic_service_span_id,
|
||||
parent_span_id="",
|
||||
name="queue.process",
|
||||
kind=TracesKind.SPAN_KIND_CONSUMER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "queue-service",
|
||||
"deployment.environment": "staging",
|
||||
},
|
||||
attributes={
|
||||
"messaging.operation": "process",
|
||||
"messaging.system": "rabbitmq",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "traces",
|
||||
"limit": 1000,
|
||||
}
|
||||
|
||||
# Export traces as JSONL (GET for simple queries)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
assert "attachment" in response.headers.get("Content-Disposition", "")
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 2, f"Expected 2 lines, got {len(jsonl_lines)}"
|
||||
|
||||
# Verify each line is valid JSON
|
||||
json_objects = []
|
||||
for line in jsonl_lines:
|
||||
obj = json.loads(line)
|
||||
json_objects.append(obj)
|
||||
assert "trace_id" in obj
|
||||
assert "span_id" in obj
|
||||
assert "name" in obj
|
||||
|
||||
# Verify trace IDs are present
|
||||
trace_ids = [obj.get("trace_id") for obj in json_objects]
|
||||
assert http_service_trace_id in trace_ids
|
||||
assert topic_service_trace_id in trace_ids
|
||||
|
||||
# Verify span names are present
|
||||
span_names = [obj.get("name") for obj in json_objects]
|
||||
assert "POST /api/test" in span_names
|
||||
assert "queue.process" in span_names
|
||||
|
||||
|
||||
def test_export_traces_with_filter(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert traces with different service names.
|
||||
|
||||
Tests:
|
||||
1. Export traces with filter applied
|
||||
2. Verify only filtered traces are returned
|
||||
"""
|
||||
service_a_trace_id = TraceIdGenerator.trace_id()
|
||||
service_a_span_id = TraceIdGenerator.span_id()
|
||||
service_b_trace_id = TraceIdGenerator.trace_id()
|
||||
service_b_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=service_a_trace_id,
|
||||
span_id=service_a_span_id,
|
||||
parent_span_id="",
|
||||
name="operation-a",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "service-a",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=2),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=service_b_trace_id,
|
||||
span_id=service_b_span_id,
|
||||
parent_span_id="",
|
||||
name="operation-b",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "service-b",
|
||||
},
|
||||
attributes={},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "jsonl",
|
||||
"source": "traces",
|
||||
"limit": 1000,
|
||||
"filter": "service.name = 'service-a'",
|
||||
}
|
||||
|
||||
# Export traces with filter (GET supports filter param)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 1, f"Expected 1 line (filtered), got {len(jsonl_lines)}"
|
||||
|
||||
# Verify the filtered trace
|
||||
filtered_obj = json.loads(jsonl_lines[0])
|
||||
assert filtered_obj["trace_id"] == service_a_trace_id
|
||||
assert filtered_obj["name"] == "operation-a"
|
||||
|
||||
|
||||
def test_export_traces_with_limit(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 5 traces.
|
||||
|
||||
Tests:
|
||||
1. Export traces with limit applied
|
||||
2. Verify only limited number of traces are returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
traces = []
|
||||
for i in range(5):
|
||||
traces.append(
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=i),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=TraceIdGenerator.trace_id(),
|
||||
span_id=TraceIdGenerator.span_id(),
|
||||
parent_span_id="",
|
||||
name=f"operation-{i}",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
},
|
||||
attributes={},
|
||||
)
|
||||
)
|
||||
|
||||
insert_traces(traces)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
params = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"format": "csv",
|
||||
"source": "traces",
|
||||
"limit": 3,
|
||||
}
|
||||
|
||||
# Export traces with limit (GET supports limit param)
|
||||
response = requests.get(
|
||||
signoz.self.host_configs["8080"].get(
|
||||
f"/api/v1/export_raw_data?{urlencode(params)}"
|
||||
),
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "text/csv"
|
||||
|
||||
# Parse CSV content
|
||||
csv_content = response.text
|
||||
csv_reader = csv.DictReader(io.StringIO(csv_content))
|
||||
|
||||
rows = list(csv_reader)
|
||||
assert len(rows) == 3, f"Expected 3 rows (limited), got {len(rows)}"
|
||||
|
||||
|
||||
def test_export_traces_multiple_queries_rejected(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
) -> None:
|
||||
"""
|
||||
Tests:
|
||||
1. POST with multiple builder queries but no trace operator is rejected
|
||||
2. Verify 400 error is returned
|
||||
"""
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
body = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"compositeQuery": {
|
||||
"queries": [
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"signal": "traces",
|
||||
"name": "A",
|
||||
"limit": 1000,
|
||||
"filter": {"expression": "service.name = 'service-a'"},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"signal": "traces",
|
||||
"name": "B",
|
||||
"limit": 1000,
|
||||
"filter": {"expression": "service.name = 'service-b'"},
|
||||
},
|
||||
},
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
|
||||
response = requests.post(
|
||||
url,
|
||||
json=body,
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.BAD_REQUEST
|
||||
|
||||
|
||||
def test_export_traces_with_composite_query_trace_operator(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert multiple traces with parent-child relationships.
|
||||
|
||||
Tests:
|
||||
1. Export traces using trace operator in composite query (POST)
|
||||
2. Verify trace operator query works correctly
|
||||
"""
|
||||
parent_trace_id = TraceIdGenerator.trace_id()
|
||||
parent_span_id = TraceIdGenerator.span_id()
|
||||
child_span_id_1 = TraceIdGenerator.span_id()
|
||||
child_span_id_2 = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
duration=timedelta(seconds=5),
|
||||
trace_id=parent_trace_id,
|
||||
span_id=parent_span_id,
|
||||
parent_span_id="",
|
||||
name="parent-operation",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "parent-service",
|
||||
},
|
||||
attributes={
|
||||
"operation.type": "parent",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=9),
|
||||
duration=timedelta(seconds=2),
|
||||
trace_id=parent_trace_id,
|
||||
span_id=child_span_id_1,
|
||||
parent_span_id=parent_span_id,
|
||||
name="child-operation-1",
|
||||
kind=TracesKind.SPAN_KIND_INTERNAL,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "parent-service",
|
||||
},
|
||||
attributes={
|
||||
"operation.type": "child",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=7),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=parent_trace_id,
|
||||
span_id=child_span_id_2,
|
||||
parent_span_id=parent_span_id,
|
||||
name="child-operation-2",
|
||||
kind=TracesKind.SPAN_KIND_INTERNAL,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "parent-service",
|
||||
},
|
||||
attributes={
|
||||
"operation.type": "child",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
# A: spans with operation.type = 'parent'
|
||||
query_a = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"signal": "traces",
|
||||
"name": "A",
|
||||
"limit": 1000,
|
||||
"filter": {"expression": "operation.type = 'parent'"},
|
||||
},
|
||||
}
|
||||
|
||||
# B: spans with operation.type = 'child'
|
||||
query_b = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"signal": "traces",
|
||||
"name": "B",
|
||||
"limit": 1000,
|
||||
"filter": {"expression": "operation.type = 'child'"},
|
||||
},
|
||||
}
|
||||
|
||||
# Trace operator: find traces where A has a direct descendant B
|
||||
query_c = {
|
||||
"type": "builder_trace_operator",
|
||||
"spec": {
|
||||
"name": "C",
|
||||
"expression": "A => B",
|
||||
"returnSpansFrom": "A",
|
||||
"limit": 1000,
|
||||
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
|
||||
},
|
||||
}
|
||||
|
||||
body = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"requestType": "raw",
|
||||
"compositeQuery": {
|
||||
"queries": [query_a, query_b, query_c],
|
||||
},
|
||||
}
|
||||
|
||||
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
|
||||
response = requests.post(
|
||||
url,
|
||||
json=body,
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
print(response.text)
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 1, f"Expected at least 1 line, got {len(jsonl_lines)}"
|
||||
|
||||
# Verify all returned spans belong to the matched trace
|
||||
json_objects = [json.loads(line) for line in jsonl_lines]
|
||||
trace_ids = [obj.get("trace_id") for obj in json_objects]
|
||||
assert all(tid == parent_trace_id for tid in trace_ids)
|
||||
|
||||
# Verify the parent span (returnSpansFrom = "A") is present
|
||||
span_names = [obj.get("name") for obj in json_objects]
|
||||
assert "parent-operation" in span_names
|
||||
|
||||
|
||||
def test_export_traces_with_select_fields(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert traces with various attributes.
|
||||
|
||||
Tests:
|
||||
1. Export traces with specific select fields via POST
|
||||
2. Verify only specified fields are returned in the output
|
||||
"""
|
||||
trace_id = TraceIdGenerator.trace_id()
|
||||
span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=10),
|
||||
duration=timedelta(seconds=2),
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
parent_span_id="",
|
||||
name="test-operation",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"service.name": "test-service",
|
||||
"deployment.environment": "production",
|
||||
"host.name": "server-01",
|
||||
},
|
||||
attributes={
|
||||
"http.method": "POST",
|
||||
"http.status_code": "201",
|
||||
"user.id": "user123",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
# Calculate timestamps in nanoseconds
|
||||
start_ns = int((now - timedelta(minutes=5)).timestamp() * 1e9)
|
||||
end_ns = int(now.timestamp() * 1e9)
|
||||
|
||||
body = {
|
||||
"start": start_ns,
|
||||
"end": end_ns,
|
||||
"requestType": "raw",
|
||||
"compositeQuery": {
|
||||
"queries": [
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"signal": "traces",
|
||||
"name": "A",
|
||||
"limit": 1000,
|
||||
"selectFields": [
|
||||
{
|
||||
"name": "trace_id",
|
||||
"fieldDataType": "string",
|
||||
"fieldContext": "span",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "span_id",
|
||||
"fieldDataType": "string",
|
||||
"fieldContext": "span",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "name",
|
||||
"fieldDataType": "string",
|
||||
"fieldContext": "span",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "service.name",
|
||||
"fieldDataType": "string",
|
||||
"fieldContext": "resource",
|
||||
"signal": "traces",
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
|
||||
url = signoz.self.host_configs["8080"].get("/api/v1/export_raw_data?format=jsonl")
|
||||
response = requests.post(
|
||||
url,
|
||||
json=body,
|
||||
timeout=10,
|
||||
headers={
|
||||
"authorization": f"Bearer {token}",
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.headers["Content-Type"] == "application/x-ndjson"
|
||||
|
||||
# Parse JSONL content
|
||||
jsonl_lines = response.text.strip().split("\n")
|
||||
assert len(jsonl_lines) == 1
|
||||
|
||||
# Verify the selected fields are present
|
||||
result = json.loads(jsonl_lines[0])
|
||||
assert "trace_id" in result
|
||||
assert "span_id" in result
|
||||
assert "name" in result
|
||||
|
||||
# Verify values
|
||||
assert result["trace_id"] == trace_id
|
||||
assert result["span_id"] == span_id
|
||||
assert result["name"] == "test-operation"
|
||||
Reference in New Issue
Block a user