Compare commits

..

8 Commits

Author SHA1 Message Date
nityanandagohain
445d3e8c3e fix: update validation for select and group by 2025-10-10 14:41:10 +05:30
primus-bot[bot]
6c59b5405e chore(release): bump to v0.97.0 (#9305)
Co-authored-by: primus-bot[bot] <171087277+primus-bot[bot]@users.noreply.github.com>
2025-10-10 12:19:35 +05:30
Chitransh
d26b57b0d8 feat: added new datasources (#9167)
* feat: added new datasources

* fix: added new datasource
2025-10-09 08:46:14 +00:00
Aditya Singh
da17375f10 Preferences framework re-factor (#9206)
* fix: logs popover content logic extracted out

* fix: logs popover content in live view

* fix: destory popover on close

* feat: add logs format tests

* feat: minor refactor

* feat: test case refactor

* feat: remove menu refs in logs live view

* feat: globalise Preference context and remove async logic

* feat: change preference context state structure to support both logs and traces pref

* feat: test refactor
2025-10-09 04:40:52 +00:00
Vikrant Gupta
a96489d06e feat(authz): address tenant isolation for authz (#9293)
* feat(authz): address tenant isolation for authz

* feat(authz): handle role module self registry

* feat(authz): keep role / user / resource sync in naming

* feat(authz): rename orgId to orgID

* feat(authz): add the missing / for user

* feat(authz): remove embedding for pkgopenfgaauthz service
2025-10-08 17:04:00 +00:00
Nityananda Gohain
8c29debb52 fix: use numerical comparison instead of lexicographical for string-encoded numbers (#9154)
* fix: let clickhouse handle string to number conversion

* fix: ignore casting if it's a comparison operator for number key

* fix: add integration tests

* fix: update comments

* fix: convert only if it's actually not a integrer with comparison operator

* fix: force convert to float when number

* fix: integration tests

* fix: correct the comment

* fix: update comment

---------

Co-authored-by: Srikanth Chekuri <srikanth.chekuri92@gmail.com>
2025-10-08 14:21:40 +05:30
Srikanth Chekuri
9cebd49a2c fix: anomaly with below operator negates the target (#9288) 2025-10-08 12:11:31 +05:30
Shaheer Kochai
a22ef64bb0 fix: fix the flaky test (#9255)
* fix: fix the flaky test

* chore: remove unnecessary changes
2025-10-06 22:02:12 +05:30
91 changed files with 2304 additions and 1696 deletions

View File

@@ -42,7 +42,7 @@ services:
timeout: 5s
retries: 3
schema-migrator-sync:
image: signoz/signoz-schema-migrator:v0.129.6
image: signoz/signoz-schema-migrator:v0.129.7
container_name: schema-migrator-sync
command:
- sync
@@ -55,7 +55,7 @@ services:
condition: service_healthy
restart: on-failure
schema-migrator-async:
image: signoz/signoz-schema-migrator:v0.129.6
image: signoz/signoz-schema-migrator:v0.129.7
container_name: schema-migrator-async
command:
- async

View File

@@ -176,7 +176,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.96.1
image: signoz/signoz:v0.97.0
command:
- --config=/root/config/prometheus.yml
ports:
@@ -209,7 +209,7 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.129.6
image: signoz/signoz-otel-collector:v0.129.7
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -233,7 +233,7 @@ services:
- signoz
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:v0.129.6
image: signoz/signoz-schema-migrator:v0.129.7
deploy:
restart_policy:
condition: on-failure

View File

@@ -117,7 +117,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:v0.96.1
image: signoz/signoz:v0.97.0
command:
- --config=/root/config/prometheus.yml
ports:
@@ -150,7 +150,7 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:v0.129.6
image: signoz/signoz-otel-collector:v0.129.7
command:
- --config=/etc/otel-collector-config.yaml
- --manager-config=/etc/manager-config.yaml
@@ -176,7 +176,7 @@ services:
- signoz
schema-migrator:
!!merge <<: *common
image: signoz/signoz-schema-migrator:v0.129.6
image: signoz/signoz-schema-migrator:v0.129.7
deploy:
restart_policy:
condition: on-failure

View File

@@ -179,7 +179,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.96.1}
image: signoz/signoz:${VERSION:-v0.97.0}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
@@ -213,7 +213,7 @@ services:
# TODO: support otel-collector multiple replicas. Nginx/Traefik for loadbalancing?
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.6}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.7}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -239,7 +239,7 @@ services:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.7}
container_name: schema-migrator-sync
command:
- sync
@@ -250,7 +250,7 @@ services:
condition: service_healthy
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.7}
container_name: schema-migrator-async
command:
- async

View File

@@ -111,7 +111,7 @@ services:
# - ../common/clickhouse/storage.xml:/etc/clickhouse-server/config.d/storage.xml
signoz:
!!merge <<: *db-depend
image: signoz/signoz:${VERSION:-v0.96.1}
image: signoz/signoz:${VERSION:-v0.97.0}
container_name: signoz
command:
- --config=/root/config/prometheus.yml
@@ -144,7 +144,7 @@ services:
retries: 3
otel-collector:
!!merge <<: *db-depend
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.6}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-v0.129.7}
container_name: signoz-otel-collector
command:
- --config=/etc/otel-collector-config.yaml
@@ -166,7 +166,7 @@ services:
condition: service_healthy
schema-migrator-sync:
!!merge <<: *common
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.7}
container_name: schema-migrator-sync
command:
- sync
@@ -178,7 +178,7 @@ services:
restart: on-failure
schema-migrator-async:
!!merge <<: *db-depend
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.6}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-v0.129.7}
container_name: schema-migrator-async
command:
- async

View File

@@ -232,7 +232,7 @@ func (p *BaseSeasonalProvider) getPredictedSeries(
// moving avg of the previous period series + z score threshold * std dev of the series
// moving avg of the previous period series - z score threshold * std dev of the series
func (p *BaseSeasonalProvider) getBounds(
series, predictedSeries *qbtypes.TimeSeries,
series, predictedSeries, weekSeries *qbtypes.TimeSeries,
zScoreThreshold float64,
) (*qbtypes.TimeSeries, *qbtypes.TimeSeries) {
upperBoundSeries := &qbtypes.TimeSeries{
@@ -246,8 +246,8 @@ func (p *BaseSeasonalProvider) getBounds(
}
for idx, curr := range series.Values {
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(series)
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(series)
upperBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) + zScoreThreshold*p.getStdDev(weekSeries)
lowerBound := p.getMovingAvg(predictedSeries, movingAvgWindowSize, idx) - zScoreThreshold*p.getStdDev(weekSeries)
upperBoundSeries.Values = append(upperBoundSeries.Values, &qbtypes.TimeSeriesValue{
Timestamp: curr.Timestamp,
Value: upperBound,
@@ -398,8 +398,6 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UU
aggOfInterest := result.Aggregations[0]
for _, series := range aggOfInterest.Series {
stdDev := p.getStdDev(series)
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
pastPeriodSeries := p.getMatchingSeries(ctx, pastPeriodResult, series)
currentSeasonSeries := p.getMatchingSeries(ctx, currentSeasonResult, series)
@@ -407,6 +405,9 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UU
past2SeasonSeries := p.getMatchingSeries(ctx, past2SeasonResult, series)
past3SeasonSeries := p.getMatchingSeries(ctx, past3SeasonResult, series)
stdDev := p.getStdDev(currentSeasonSeries)
p.logger.InfoContext(ctx, "calculated standard deviation for series", "anomaly_std_dev", stdDev, "anomaly_labels", series.Labels)
prevSeriesAvg := p.getAvg(pastPeriodSeries)
currentSeasonSeriesAvg := p.getAvg(currentSeasonSeries)
pastSeasonSeriesAvg := p.getAvg(pastSeasonSeries)
@@ -435,6 +436,7 @@ func (p *BaseSeasonalProvider) getAnomalies(ctx context.Context, orgID valuer.UU
upperBoundSeries, lowerBoundSeries := p.getBounds(
series,
predictedSeries,
currentSeasonSeries,
zScoreThreshold,
)
aggOfInterest.UpperBoundSeries = append(aggOfInterest.UpperBoundSeries, upperBoundSeries)

View File

@@ -0,0 +1,79 @@
package openfgaauthz
import (
"context"
"github.com/SigNoz/signoz/pkg/authz"
pkgopenfgaauthz "github.com/SigNoz/signoz/pkg/authz/openfgaauthz"
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/sqlstore"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
openfgapkgtransformer "github.com/openfga/language/pkg/go/transformer"
)
type provider struct {
pkgAuthzService authz.AuthZ
}
func NewProviderFactory(sqlstore sqlstore.SQLStore, openfgaSchema []openfgapkgtransformer.ModuleFile) factory.ProviderFactory[authz.AuthZ, authz.Config] {
return factory.NewProviderFactory(factory.MustNewName("openfga"), func(ctx context.Context, ps factory.ProviderSettings, config authz.Config) (authz.AuthZ, error) {
return newOpenfgaProvider(ctx, ps, config, sqlstore, openfgaSchema)
})
}
func newOpenfgaProvider(ctx context.Context, settings factory.ProviderSettings, config authz.Config, sqlstore sqlstore.SQLStore, openfgaSchema []openfgapkgtransformer.ModuleFile) (authz.AuthZ, error) {
pkgOpenfgaAuthzProvider := pkgopenfgaauthz.NewProviderFactory(sqlstore, openfgaSchema)
pkgAuthzService, err := pkgOpenfgaAuthzProvider.New(ctx, settings, config)
if err != nil {
return nil, err
}
return &provider{
pkgAuthzService: pkgAuthzService,
}, nil
}
func (provider *provider) Start(ctx context.Context) error {
return provider.pkgAuthzService.Start(ctx)
}
func (provider *provider) Stop(ctx context.Context) error {
return provider.pkgAuthzService.Stop(ctx)
}
func (provider *provider) Check(ctx context.Context, tuple *openfgav1.TupleKey) error {
return provider.pkgAuthzService.Check(ctx, tuple)
}
func (provider *provider) CheckWithTupleCreation(ctx context.Context, claims authtypes.Claims, orgID valuer.UUID, relation authtypes.Relation, _ authtypes.Relation, typeable authtypes.Typeable, selectors []authtypes.Selector) error {
subject, err := authtypes.NewSubject(authtypes.TypeUser, claims.UserID, authtypes.Relation{})
if err != nil {
return err
}
tuples, err := typeable.Tuples(subject, relation, selectors, orgID)
if err != nil {
return err
}
err = provider.BatchCheck(ctx, tuples)
if err != nil {
return err
}
return nil
}
func (provider *provider) BatchCheck(ctx context.Context, tuples []*openfgav1.TupleKey) error {
return provider.pkgAuthzService.BatchCheck(ctx, tuples)
}
func (provider *provider) ListObjects(ctx context.Context, subject string, relation authtypes.Relation, typeable authtypes.Typeable) ([]*authtypes.Object, error) {
return provider.pkgAuthzService.ListObjects(ctx, subject, relation, typeable)
}
func (provider *provider) Write(ctx context.Context, additions []*openfgav1.TupleKey, deletions []*openfgav1.TupleKey) error {
return provider.pkgAuthzService.Write(ctx, additions, deletions)
}

View File

@@ -1,132 +0,0 @@
package middleware
import (
"log/slog"
"net/http"
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/gorilla/mux"
)
const (
authzDeniedMessage string = "::AUTHZ-DENIED::"
)
type AuthZ struct {
logger *slog.Logger
authzService authz.AuthZ
}
func NewAuthZ(logger *slog.Logger) *AuthZ {
if logger == nil {
panic("cannot build authz middleware, logger is empty")
}
return &AuthZ{logger: logger}
}
func (middleware *AuthZ) ViewAccess(next http.HandlerFunc) http.HandlerFunc {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
if err := claims.IsViewer(); err != nil {
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}
next(rw, req)
})
}
func (middleware *AuthZ) EditAccess(next http.HandlerFunc) http.HandlerFunc {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
if err := claims.IsEditor(); err != nil {
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}
next(rw, req)
})
}
func (middleware *AuthZ) AdminAccess(next http.HandlerFunc) http.HandlerFunc {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
if err := claims.IsAdmin(); err != nil {
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}
next(rw, req)
})
}
func (middleware *AuthZ) SelfAccess(next http.HandlerFunc) http.HandlerFunc {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
id := mux.Vars(req)["id"]
if err := claims.IsSelfAccess(id); err != nil {
middleware.logger.WarnContext(req.Context(), authzDeniedMessage, "claims", claims)
render.Error(rw, err)
return
}
next(rw, req)
})
}
func (middleware *AuthZ) OpenAccess(next http.HandlerFunc) http.HandlerFunc {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
next(rw, req)
})
}
// Check middleware accepts the relation, typeable, parentTypeable (for direct access + group relations) and a callback function to derive selector and parentSelectors on per request basis.
func (middleware *AuthZ) Check(next http.HandlerFunc, relation authtypes.Relation, translation authtypes.Relation, typeable authtypes.Typeable, cb authtypes.SelectorCallbackFn) http.HandlerFunc {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
render.Error(rw, err)
return
}
selector, err := cb(req.Context(), claims)
if err != nil {
render.Error(rw, err)
return
}
err = middleware.authzService.CheckWithTupleCreation(req.Context(), claims, relation, typeable, selector)
if err != nil {
render.Error(rw, err)
return
}
next(rw, req)
})
}

View File

@@ -78,11 +78,6 @@ func NewAnomalyRule(
opts = append(opts, baserules.WithLogger(logger))
if p.RuleCondition.CompareOp == ruletypes.ValueIsBelow {
target := -1 * *p.RuleCondition.Target
p.RuleCondition.Target = &target
}
baseRule, err := baserules.NewBaseRule(id, orgID, p, reader, opts...)
if err != nil {
return nil, err

View File

@@ -1,8 +1,9 @@
import { Page } from '@playwright/test';
// Read credentials from environment variables
const username = process.env.SIGNOZ_E2E_USERNAME;
const password = process.env.SIGNOZ_E2E_PASSWORD;
const username = process.env.LOGIN_USERNAME;
const password = process.env.LOGIN_PASSWORD;
const baseURL = process.env.BASE_URL;
/**
* Ensures the user is logged in. If not, performs the login steps.
@@ -10,17 +11,17 @@ const password = process.env.SIGNOZ_E2E_PASSWORD;
*/
export async function ensureLoggedIn(page: Page): Promise<void> {
// if already in home page, return
if (page.url().includes('/home')) {
if (await page.url().includes('/home')) {
return;
}
if (!username || !password) {
throw new Error(
'SIGNOZ_E2E_USERNAME and SIGNOZ_E2E_PASSWORD environment variables must be set.',
'E2E_EMAIL and E2E_PASSWORD environment variables must be set.',
);
}
await page.goto('/login');
await page.goto(`${baseURL}/login`);
await page.getByTestId('email').click();
await page.getByTestId('email').fill(username);
await page.getByTestId('initiate_login').click();

View File

@@ -4,9 +4,4 @@ FRONTEND_API_ENDPOINT="http://localhost:8080/"
PYLON_APP_ID="pylon-app-id"
APPCUES_APP_ID="appcess-app-id"
CI="1"
# Playwright E2E Test Configuration
SIGNOZ_E2E_BASE_URL="your-dev-environment-url"
SIGNOZ_E2E_USERNAME="your-email@example.com"
SIGNOZ_E2E_PASSWORD="your-password"
CI="1"

View File

@@ -18,12 +18,7 @@
"husky:configure": "cd .. && husky install frontend/.husky && cd frontend && chmod ug+x .husky/*",
"commitlint": "commitlint --edit $1",
"test": "jest",
"test:changedsince": "jest --changedSince=main --coverage --silent",
"e2e": "playwright test",
"e2e:ui": "playwright test --ui",
"e2e:headed": "playwright test --headed",
"e2e:debug": "playwright test --debug",
"e2e:report": "playwright show-report"
"test:changedsince": "jest --changedSince=main --coverage --silent"
},
"engines": {
"node": ">=16.15.0"

View File

@@ -45,7 +45,14 @@ export default defineConfig({
projects: [
{
name: 'chromium',
use: { ...devices['Desktop Chrome'] },
use: {
launchOptions: { args: ['--start-maximized'] },
viewport: null,
colorScheme: 'dark',
locale: 'en-US',
baseURL: 'https://app.us.staging.signoz.cloud',
trace: 'on-first-retry',
},
},
{

View File

@@ -0,0 +1,16 @@
<svg version="1.1" id="Layer_1" xmlns:x="ns_extend;" xmlns:i="ns_ai;" xmlns:graph="ns_graphs;" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" viewBox="0 0 92.2 65" style="enable-background:new 0 0 92.2 65;" xml:space="preserve">
<style type="text/css">
.st0{fill:#FFFFFF;}
</style>
<metadata>
<sfw xmlns="ns_sfw;">
<slices>
</slices>
<sliceSourceBounds bottomLeftOrigin="true" height="65" width="92.2" x="-43.7" y="-98">
</sliceSourceBounds>
</sfw>
</metadata>
<path class="st0" d="M66.5,0H52.4l25.7,65h14.1L66.5,0z M25.7,0L0,65h14.4l5.3-13.6h26.9L51.8,65h14.4L40.5,0C40.5,0,25.7,0,25.7,0z
M24.3,39.3l8.8-22.8l8.8,22.8H24.3z">
</path>
</svg>

After

Width:  |  Height:  |  Size: 714 B

View File

@@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Claude</title><path d="M4.709 15.955l4.72-2.647.08-.23-.08-.128H9.2l-.79-.048-2.698-.073-2.339-.097-2.266-.122-.571-.121L0 11.784l.055-.352.48-.321.686.06 1.52.103 2.278.158 1.652.097 2.449.255h.389l.055-.157-.134-.098-.103-.097-2.358-1.596-2.552-1.688-1.336-.972-.724-.491-.364-.462-.158-1.008.656-.722.881.06.225.061.893.686 1.908 1.476 2.491 1.833.365.304.145-.103.019-.073-.164-.274-1.355-2.446-1.446-2.49-.644-1.032-.17-.619a2.97 2.97 0 01-.104-.729L6.283.134 6.696 0l.996.134.42.364.62 1.414 1.002 2.229 1.555 3.03.456.898.243.832.091.255h.158V9.01l.128-1.706.237-2.095.23-2.695.08-.76.376-.91.747-.492.584.28.48.685-.067.444-.286 1.851-.559 2.903-.364 1.942h.212l.243-.242.985-1.306 1.652-2.064.73-.82.85-.904.547-.431h1.033l.76 1.129-.34 1.166-1.064 1.347-.881 1.142-1.264 1.7-.79 1.36.073.11.188-.02 2.856-.606 1.543-.28 1.841-.315.833.388.091.395-.328.807-1.969.486-2.309.462-3.439.813-.042.03.049.061 1.549.146.662.036h1.622l3.02.225.79.522.474.638-.079.485-1.215.62-1.64-.389-3.829-.91-1.312-.329h-.182v.11l1.093 1.068 2.006 1.81 2.509 2.33.127.578-.322.455-.34-.049-2.205-1.657-.851-.747-1.926-1.62h-.128v.17l.444.649 2.345 3.521.122 1.08-.17.353-.608.213-.668-.122-1.374-1.925-1.415-2.167-1.143-1.943-.14.08-.674 7.254-.316.37-.729.28-.607-.461-.322-.747.322-1.476.389-1.924.315-1.53.286-1.9.17-.632-.012-.042-.14.018-1.434 1.967-2.18 2.945-1.726 1.845-.414.164-.717-.37.067-.662.401-.589 2.388-3.036 1.44-1.882.93-1.086-.006-.158h-.055L4.132 18.56l-1.13.146-.487-.456.061-.746.231-.243 1.908-1.312-.006.006z" fill="#D97757" fill-rule="nonzero"></path></svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>DeepSeek</title><path d="M23.748 4.482c-.254-.124-.364.113-.512.234-.051.039-.094.09-.137.136-.372.397-.806.657-1.373.626-.829-.046-1.537.214-2.163.848-.133-.782-.575-1.248-1.247-1.548-.352-.156-.708-.311-.955-.65-.172-.241-.219-.51-.305-.774-.055-.16-.11-.323-.293-.35-.2-.031-.278.136-.356.276-.313.572-.434 1.202-.422 1.84.027 1.436.633 2.58 1.838 3.393.137.093.172.187.129.323-.082.28-.18.552-.266.833-.055.179-.137.217-.329.14a5.526 5.526 0 01-1.736-1.18c-.857-.828-1.631-1.742-2.597-2.458a11.365 11.365 0 00-.689-.471c-.985-.957.13-1.743.388-1.836.27-.098.093-.432-.779-.428-.872.004-1.67.295-2.687.684a3.055 3.055 0 01-.465.137 9.597 9.597 0 00-2.883-.102c-1.885.21-3.39 1.102-4.497 2.623C.082 8.606-.231 10.684.152 12.85c.403 2.284 1.569 4.175 3.36 5.653 1.858 1.533 3.997 2.284 6.438 2.14 1.482-.085 3.133-.284 4.994-1.86.47.234.962.327 1.78.397.63.059 1.236-.03 1.705-.128.735-.156.684-.837.419-.961-2.155-1.004-1.682-.595-2.113-.926 1.096-1.296 2.746-2.642 3.392-7.003.05-.347.007-.565 0-.845-.004-.17.035-.237.23-.256a4.173 4.173 0 001.545-.475c1.396-.763 1.96-2.015 2.093-3.517.02-.23-.004-.467-.247-.588zM11.581 18c-2.089-1.642-3.102-2.183-3.52-2.16-.392.024-.321.471-.235.763.09.288.207.486.371.739.114.167.192.416-.113.603-.673.416-1.842-.14-1.897-.167-1.361-.802-2.5-1.86-3.301-3.307-.774-1.393-1.224-2.887-1.298-4.482-.02-.386.093-.522.477-.592a4.696 4.696 0 011.529-.039c2.132.312 3.946 1.265 5.468 2.774.868.86 1.525 1.887 2.202 2.891.72 1.066 1.494 2.082 2.48 2.914.348.292.625.514.891.677-.802.09-2.14.11-3.054-.614zm1-6.44a.306.306 0 01.415-.287.302.302 0 01.2.288.306.306 0 01-.31.307.303.303 0 01-.304-.308zm3.11 1.596c-.2.081-.399.151-.59.16a1.245 1.245 0 01-.798-.254c-.274-.23-.47-.358-.552-.758a1.73 1.73 0 01.016-.588c.07-.327-.008-.537-.239-.727-.187-.156-.426-.199-.688-.199a.559.559 0 01-.254-.078c-.11-.054-.2-.19-.114-.358.028-.054.16-.186.192-.21.356-.202.767-.136 1.146.016.352.144.618.408 1.001.782.391.451.462.576.685.914.176.265.336.537.445.848.067.195-.019.354-.25.452z" fill="#4D6BFE"></path></svg>

After

Width:  |  Height:  |  Size: 2.1 KiB

View File

@@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>Gemini</title><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="#3186FF"></path><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="url(#lobe-icons-gemini-fill-0)"></path><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="url(#lobe-icons-gemini-fill-1)"></path><path d="M20.616 10.835a14.147 14.147 0 01-4.45-3.001 14.111 14.111 0 01-3.678-6.452.503.503 0 00-.975 0 14.134 14.134 0 01-3.679 6.452 14.155 14.155 0 01-4.45 3.001c-.65.28-1.318.505-2.002.678a.502.502 0 000 .975c.684.172 1.35.397 2.002.677a14.147 14.147 0 014.45 3.001 14.112 14.112 0 013.679 6.453.502.502 0 00.975 0c.172-.685.397-1.351.677-2.003a14.145 14.145 0 013.001-4.45 14.113 14.113 0 016.453-3.678.503.503 0 000-.975 13.245 13.245 0 01-2.003-.678z" fill="url(#lobe-icons-gemini-fill-2)"></path><defs><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-gemini-fill-0" x1="7" x2="11" y1="15.5" y2="12"><stop stop-color="#08B962"></stop><stop offset="1" stop-color="#08B962" stop-opacity="0"></stop></linearGradient><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-gemini-fill-1" x1="8" x2="11.5" y1="5.5" y2="11"><stop stop-color="#F94543"></stop><stop offset="1" stop-color="#F94543" stop-opacity="0"></stop></linearGradient><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-gemini-fill-2" x1="3.5" x2="17.5" y1="13.5" y2="12"><stop stop-color="#FABC12"></stop><stop offset=".46" stop-color="#FABC12" stop-opacity="0"></stop></linearGradient></defs></svg>

After

Width:  |  Height:  |  Size: 2.8 KiB

View File

@@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>LangChain</title><path d="M8.373 14.502c.013-.06.024-.118.038-.17l.061.145c.115.28.229.557.506.714-.012.254-.334.357-.552.326-.048-.114-.115-.228-.255-.164-.143.056-.3-.01-.266-.185.333-.012.407-.371.468-.666zM18.385 9.245c-.318 0-.616.122-.839.342l-.902.887c-.243.24-.368.572-.343.913l.006.056c.032.262.149.498.337.682.13.128.273.21.447.266a.866.866 0 01-.247.777l-.056.055a2.022 2.022 0 01-1.355-1.555l-.01-.057-.046.037c-.03.024-.06.05-.088.078l-.902.887a1.156 1.156 0 000 1.65c.231.228.535.342.84.342.304 0 .607-.114.838-.341l.902-.888a1.156 1.156 0 00-.436-1.921.953.953 0 01.276-.842 2.062 2.062 0 011.371 1.57l.01.057.047-.037c.03-.024.06-.05.088-.078l.902-.888a1.155 1.155 0 000-1.65 1.188 1.188 0 00-.84-.342z" fill="#1C3C3C"></path><path clip-rule="evenodd" d="M17.901 6H6.1C2.736 6 0 8.692 0 12s2.736 6 6.099 6H17.9C21.264 18 24 15.308 24 12s-2.736-6-6.099-6zm-5.821 9.407c-.195.04-.414.047-.562-.106-.045.1-.136.077-.221.056a.797.797 0 00-.061-.014c-.01.025-.017.048-.026.073-.329.021-.575-.309-.732-.558a4.991 4.991 0 00-.473-.21c-.172-.07-.345-.14-.509-.23a2.218 2.218 0 00-.004.173c-.002.244-.004.503-.227.651-.007.295.236.292.476.29.207-.003.41-.005.447.184a.485.485 0 01-.05.003c-.046 0-.092 0-.127.034-.117.111-.242.063-.372.013-.12-.046-.243-.094-.367-.02a2.318 2.318 0 00-.262.154.97.97 0 01-.548.194c-.024-.036-.014-.059.006-.08a.562.562 0 00.043-.056c.019-.028.035-.057.051-.084.054-.095.103-.18.242-.22-.185-.029-.344.055-.5.137l-.004.002a4.21 4.21 0 01-.065.034c-.097.04-.154.009-.212-.023-.082-.045-.168-.092-.376.04-.04-.032-.02-.061.002-.086.091-.109.21-.125.345-.119-.351-.193-.604-.056-.81.055-.182.098-.327.176-.471-.012-.065.017-.102.063-.138.108-.015.02-.03.038-.047.055-.035-.039-.027-.083-.018-.128l.005-.026a.242.242 0 00.003-.03l-.027-.01c-.053-.022-.105-.044-.09-.124-.117-.04-.2.03-.286.094-.054-.041-.01-.095.032-.145a.279.279 0 00.045-.065c.038-.065.103-.067.166-.069.054-.001.108-.003.145-.042.133-.075.297-.036.462.003.121.028.242.057.354.042.203.025.454-.18.352-.385-.186-.233-.184-.528-.183-.813v-.143c-.016-.108-.172-.233-.328-.358-.12-.095-.24-.191-.298-.28-.16-.177-.285-.382-.409-.585l-.015-.024c-.212-.404-.297-.86-.382-1.315-.103-.546-.205-1.09-.526-1.54-.266.144-.612.075-.841-.118-.12.107-.13.247-.138.396l-.001.014c-.297-.292-.26-.844-.023-1.17.097-.128.213-.233.342-.326.03-.021.04-.042.039-.074.235-1.04 1.836-.839 2.342-.103.167.206.281.442.395.678.137.283.273.566.5.795.22.237.452.463.684.689.359.35.718.699 1.032 1.089.49.587.839 1.276 1.144 1.97.05.092.08.193.11.293.044.15.089.299.2.417.026.035.084.088.149.148.156.143.357.328.289.409.009.019.027.04.05.06.032.028.074.058.116.088.122.087.25.178.16.25zm7.778-3.545l-.902.887c-.24.237-.537.413-.859.51l-.017.005-.006.015A2.021 2.021 0 0117.6 14l-.902.888c-.393.387-.916.6-1.474.6-.557 0-1.08-.213-1.474-.6a2.03 2.03 0 010-2.9l.902-.888c.242-.238.531-.409.859-.508l.016-.004.006-.016c.105-.272.265-.516.475-.724l.902-.887c.393-.387.917-.6 1.474-.6.558 0 1.08.213 1.474.6.394.387.61.902.61 1.45 0 .549-.216 1.064-.61 1.45v.001z" fill="#1C3C3C" fill-rule="evenodd"></path></svg>

After

Width:  |  Height:  |  Size: 3.1 KiB

View File

@@ -0,0 +1 @@
<svg height="1em" style="flex:none;line-height:1" viewBox="0 0 24 24" width="1em" xmlns="http://www.w3.org/2000/svg"><title>LlamaIndex</title><path d="M15.855 17.122c-2.092.924-4.358.545-5.23.24 0 .21-.01.857-.048 1.78-.038.924-.332 1.507-.475 1.684.016.577.029 1.837-.047 2.26a1.93 1.93 0 01-.476.914H8.295c.114-.577.555-.946.761-1.058.114-1.193-.11-2.229-.238-2.597-.126.449-.437 1.49-.665 2.068a6.418 6.418 0 01-.713 1.299h-.951c-.048-.578.27-.77.475-.77.095-.177.323-.731.476-1.54.152-.807-.064-2.324-.19-2.981v-2.068c-1.522-.818-2.092-1.636-2.473-2.55-.304-.73-.222-1.843-.142-2.308-.096-.176-.373-.625-.476-1.25-.142-.866-.063-1.491 0-1.828-.095-.096-.285-.587-.285-1.78 0-1.192.349-1.811.523-1.972v-.529c-.666-.048-1.331-.336-1.712-.721-.38-.385-.095-.962.143-1.154.238-.193.475-.049.808-.145.333-.096.618-.192.76-.48C4.512 1.403 4.287.448 4.16 0c.57.077.935.577 1.046.818V0c.713.337 1.997 1.154 2.425 2.934.342 1.424.586 4.409.665 5.723 1.823.016 4.137-.26 6.229.193 1.901.412 2.757 1.25 3.755 1.25.999 0 1.57-.577 2.282-.096.714.481 1.094 1.828.999 2.838-.076.808-.697 1.074-.998 1.106-.38 1.27 0 2.485.237 2.934v1.827c.111.16.333.655.333 1.347 0 .693-.222 1.154-.333 1.299.19 1.077-.08 2.18-.238 2.597h-1.283c.152-.385.412-.481.523-.481.228-1.193.063-2.293-.048-2.693-.722-.424-1.188-1.17-1.331-1.491.016.272-.029 1.029-.333 1.875-.304.847-.76 1.347-.95 1.491v1.01h-1.284c0-.615.348-.737.523-.721.222-.4.76-1.01.76-2.212 0-1.015-.713-1.492-1.236-2.405-.248-.434-.127-.978-.047-1.203z" fill="url(#lobe-icons-llama-index-fill)"></path><defs><linearGradient gradientUnits="userSpaceOnUse" id="lobe-icons-llama-index-fill" x1="4.021" x2="24.613" y1="2.02" y2="19.277"><stop offset=".062" stop-color="#F6DCD9"></stop><stop offset=".326" stop-color="#FFA5EA"></stop><stop offset=".589" stop-color="#45DFF8"></stop><stop offset="1" stop-color="#BC8DEB"></stop></linearGradient></defs></svg>

After

Width:  |  Height:  |  Size: 1.8 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 5.5 KiB

View File

@@ -0,0 +1,2 @@
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 16 16" xmlns="http://www.w3.org/2000/svg" fill="none"><path fill="#06D092" d="M8 0L1 4v8l7 4 7-4V4L8 0zm3.119 8.797L9.254 9.863 7.001 8.65v2.549l-2.118 1.33v-5.33l1.68-1.018 2.332 1.216V4.794l2.23-1.322-.006 5.325z"/></svg>

After

Width:  |  Height:  |  Size: 389 B

View File

@@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 128 128">
<path fill="#f5a800" d="M67.648 69.797c-5.246 5.25-5.246 13.758 0 19.008 5.25 5.246 13.758 5.246 19.004 0 5.25-5.25 5.25-13.758 0-19.008-5.246-5.246-13.754-5.246-19.004 0Zm14.207 14.219a6.649 6.649 0 0 1-9.41 0 6.65 6.65 0 0 1 0-9.407 6.649 6.649 0 0 1 9.41 0c2.598 2.586 2.598 6.809 0 9.407ZM86.43 3.672l-8.235 8.234a4.17 4.17 0 0 0 0 5.875l32.149 32.149a4.17 4.17 0 0 0 5.875 0l8.234-8.235c1.61-1.61 1.61-4.261 0-5.87L92.29 3.671a4.159 4.159 0 0 0-5.86 0ZM28.738 108.895a3.763 3.763 0 0 0 0-5.31l-4.183-4.187a3.768 3.768 0 0 0-5.313 0l-8.644 8.649-.016.012-2.371-2.375c-1.313-1.313-3.45-1.313-4.75 0-1.313 1.312-1.313 3.449 0 4.75l14.246 14.242a3.353 3.353 0 0 0 4.746 0c1.3-1.313 1.313-3.45 0-4.746l-2.375-2.375.016-.012Zm0 0"/>
<path fill="#425cc7" d="M72.297 27.313 54.004 45.605c-1.625 1.625-1.625 4.301 0 5.926L65.3 62.824c7.984-5.746 19.18-5.035 26.363 2.153l9.148-9.149c1.622-1.625 1.622-4.297 0-5.922L78.22 27.313a4.185 4.185 0 0 0-5.922 0ZM60.55 67.585l-6.672-6.672c-1.563-1.562-4.125-1.562-5.684 0l-23.53 23.54a4.036 4.036 0 0 0 0 5.687l13.331 13.332a4.036 4.036 0 0 0 5.688 0l15.132-15.157c-3.199-6.609-2.625-14.593 1.735-20.73Zm0 0"/>
</svg>

After

Width:  |  Height:  |  Size: 1.2 KiB

View File

@@ -0,0 +1,99 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="64"
height="64"
viewBox="0 0 64 64"
version="1.1"
id="svg20"
sodipodi:docname="supabase-icon.svg"
style="fill:none"
inkscape:version="0.92.4 (5da689c313, 2019-01-14)">
<metadata
id="metadata24">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1"
objecttolerance="10"
gridtolerance="10"
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
inkscape:window-width="1687"
inkscape:window-height="849"
id="namedview22"
showgrid="false"
inkscape:zoom="2.0884956"
inkscape:cx="54.5"
inkscape:cy="56.5"
inkscape:window-x="70"
inkscape:window-y="0"
inkscape:window-maximized="0"
inkscape:current-layer="svg20" />
<path
d="m 37.41219,62.936701 c -1.634985,2.05896 -4.950068,0.93085 -4.989463,-1.69817 L 31.846665,22.786035 h 25.855406 c 4.683108,0 7.294967,5.409033 4.382927,9.07673 z"
id="path2"
style="fill:url(#paint0_linear);stroke-width:0.57177335"
inkscape:connector-curvature="0" />
<path
d="m 37.41219,62.936701 c -1.634985,2.05896 -4.950068,0.93085 -4.989463,-1.69817 L 31.846665,22.786035 h 25.855406 c 4.683108,0 7.294967,5.409033 4.382927,9.07673 z"
id="path4"
style="fill:url(#paint1_linear);fill-opacity:0.2;stroke-width:0.57177335"
inkscape:connector-curvature="0" />
<path
d="m 26.89694,1.0634102 c 1.634986,-2.05918508 4.950125,-0.93090008 4.989521,1.698149 L 32.138899,41.214003 H 6.607076 c -4.6832501,0 -7.29518376,-5.409032 -4.3830007,-9.07673 z"
id="path6"
inkscape:connector-curvature="0"
style="fill:#3ecf8e;stroke-width:0.57177335" />
<defs
id="defs18">
<linearGradient
id="paint0_linear"
x1="53.973801"
y1="54.973999"
x2="94.163498"
y2="71.829498"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.57177306,0,0,0.57177334,0.98590077,-0.12074988)">
<stop
stop-color="#249361"
id="stop8" />
<stop
offset="1"
stop-color="#3ECF8E"
id="stop10" />
</linearGradient>
<linearGradient
id="paint1_linear"
x1="36.1558"
y1="30.577999"
x2="54.484402"
y2="65.080597"
gradientUnits="userSpaceOnUse"
gradientTransform="matrix(0.57177306,0,0,0.57177334,0.98590077,-0.12074988)">
<stop
id="stop13" />
<stop
offset="1"
stop-opacity="0"
id="stop15" />
</linearGradient>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 3.2 KiB

View File

@@ -27,6 +27,7 @@ import { IUser } from 'providers/App/types';
import { DashboardProvider } from 'providers/Dashboard/Dashboard';
import { ErrorModalProvider } from 'providers/ErrorModalProvider';
import { KBarCommandPaletteProvider } from 'providers/KBarCommandPaletteProvider';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
import { QueryBuilderProvider } from 'providers/QueryBuilder';
import { Suspense, useCallback, useEffect, useState } from 'react';
import { Route, Router, Switch } from 'react-router-dom';
@@ -382,20 +383,22 @@ function App(): JSX.Element {
<KeyboardHotkeysProvider>
<AlertRuleProvider>
<AppLayout>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }) => (
<Route
key={`${path}`}
exact={exact}
path={path}
component={component}
/>
))}
<Route exact path="/" component={Home} />
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
<PreferenceContextProvider>
<Suspense fallback={<Spinner size="large" tip="Loading..." />}>
<Switch>
{routes.map(({ path, component, exact }) => (
<Route
key={`${path}`}
exact={exact}
path={path}
component={component}
/>
))}
<Route exact path="/" component={Home} />
<Route path="*" component={NotFound} />
</Switch>
</Suspense>
</PreferenceContextProvider>
</AppLayout>
</AlertRuleProvider>
</KeyboardHotkeysProvider>

View File

@@ -87,6 +87,21 @@
"imgUrl": "/Logos/signoz-brand-logo.svg",
"link": "https://signoz.io/docs/migration/migrate-from-signoz-self-host-to-signoz-cloud/"
},
{
"dataSource": "migrate-from-existing-opentelemetry",
"label": "From Existing OpenTelemetry",
"tags": ["migrate to SigNoz"],
"module": "home",
"relatedSearchKeywords": [
"apm migration",
"opentelemetry",
"migration guide",
"migrate",
"migration"
],
"imgUrl": "/Logos/opentelemetry.svg",
"link": "https://signoz.io/docs/migration/migrate-from-opentelemetry-to-signoz/"
},
{
"dataSource": "java",
"entityID": "dataSource",
@@ -2656,6 +2671,156 @@
],
"link": "https://signoz.io/docs/community/llm-monitoring/"
},
{
"dataSource": "anthropic-api",
"label": "Anthropic API",
"imgUrl": "/Logos/anthropic-api-monitoring.svg",
"tags": ["LLM Monitoring"],
"module": "metrics",
"relatedSearchKeywords": [
"llm monitoring",
"large language model observability",
"monitor anthropic",
"llm response time tracing",
"llm metrics",
"otel llm integration",
"llm performance tracking",
"metrics",
"traces",
"logs"
],
"link": "https://signoz.io/docs/anthropic-monitoring/"
},
{
"dataSource": "claude-code",
"label": "Claude Code",
"imgUrl": "/Logos/claude-code.svg",
"tags": ["LLM Monitoring"],
"module": "metrics",
"relatedSearchKeywords": [
"claude code monitoring",
"claude code observability",
"claude code performance tracking",
"claude code latency tracing",
"claude code metrics",
"otel claude integration",
"claude code response time",
"claude code logs",
"claude code error tracking",
"claude code debugging",
"metrics",
"logs"
],
"link": "https://signoz.io/docs/claude-code-monitoring/"
},
{
"dataSource": "deepseek-api",
"label": "DeepSeek API",
"imgUrl": "/Logos/deepseek.svg",
"tags": ["LLM Monitoring"],
"module": "metrics",
"relatedSearchKeywords": [
"deepseek api monitoring",
"deepseek api observability",
"deepseek api performance tracking",
"deepseek api latency tracing",
"deepseek api metrics",
"otel deepseek integration",
"deepseek api response time",
"deepseek api logs",
"deepseek api error tracking",
"deepseek api debugging",
"metrics",
"logs"
],
"link": "https://signoz.io/docs/deepseek-monitoring/"
},
{
"dataSource": "google-gemini-api",
"label": "Google Gemini",
"imgUrl": "/Logos/google-gemini.svg",
"tags": ["LLM Monitoring"],
"module": "metrics",
"relatedSearchKeywords": [
"google gemini api monitoring",
"google gemini api observability",
"google gemini api performance tracking",
"google gemini api latency tracing",
"google gemini api metrics",
"otel google gemini integration",
"google gemini api response time",
"google gemini api logs",
"google gemini api error tracking",
"google gemini api debugging",
"gemini",
"metrics",
"logs"
],
"link": "https://signoz.io/docs/google-gemini-monitoring/"
},
{
"dataSource": "langchain",
"label": "LangChain",
"imgUrl": "/Logos/langchain.svg",
"tags": ["LLM Monitoring"],
"module": "apm",
"relatedSearchKeywords": [
"langchain monitoring",
"langchain observability",
"langchain performance tracking",
"langchain latency tracing",
"langchain metrics",
"otel langchain integration",
"langchain response time",
"langchain logs",
"langchain error tracking",
"langchain debugging",
"traces"
],
"link": "https://signoz.io/docs/langchain-monitoring/"
},
{
"dataSource": "llamaindex",
"label": "LlamaIndex",
"imgUrl": "/Logos/llamaindex.svg",
"tags": ["LLM Monitoring"],
"module": "apm",
"relatedSearchKeywords": [
"llamaindex monitoring",
"llamaindex observability",
"llamaindex performance tracking",
"llamaindex latency tracing",
"llamaindex metrics",
"otel llamaindex integration",
"llamaindex response time",
"llamaindex logs",
"llamaindex error tracking",
"llamaindex debugging",
"traces"
],
"link": "https://signoz.io/docs/llamaindex-monitoring/"
},
{
"dataSource": "vercel-ai-sdk",
"label": "Vercel AI SDK",
"imgUrl": "/Logos/vercel.svg",
"tags": ["LLM Monitoring"],
"module": "apm",
"relatedSearchKeywords": [
"vercel ai sdk monitoring",
"vercel ai sdk observability",
"vercel ai sdk performance tracking",
"vercel ai sdk latency tracing",
"vercel ai sdk metrics",
"otel vercel ai sdk integration",
"vercel ai sdk response time",
"vercel ai sdk logs",
"vercel ai sdk error tracking",
"vercel ai sdk debugging",
"traces"
],
"link": "https://signoz.io/docs/vercel-ai-sdk-monitoring/"
},
{
"dataSource": "http-endpoints-monitoring",
"label": "HTTP Endpoints Monitoring",
@@ -3391,5 +3556,58 @@
}
]
}
},
{
"dataSource": "microsoft-sql-server",
"label": "Microsoft SQL Server",
"imgUrl": "/Logos/microsoft-sql-server.svg",
"tags": ["integrations"],
"module": "metrics",
"relatedSearchKeywords": [
"sql server metrics",
"mssql monitoring",
"sql server performance",
"sql server observability",
"Microsoft",
"sql server logs",
"metrics",
"logs"
],
"id": "microsoft-sql-server",
"link": "https://signoz.io/docs/integrations/sql-server/"
},
{
"dataSource": "supabase",
"label": "Supabase",
"imgUrl": "/Logos/supabase.svg",
"tags": ["integrations"],
"module": "metrics",
"relatedSearchKeywords": [
"supabase metrics",
"supabase monitoring",
"supabase performance",
"supabase observability",
"supabase",
"metrics"
],
"id": "supabase",
"link": "https://signoz.io/docs/integrations/supabase/"
},
{
"dataSource": "nomad",
"label": "Nomad",
"imgUrl": "/Logos/nomad.svg",
"tags": ["integrations"],
"module": "metrics",
"relatedSearchKeywords": [
"nomad metrics",
"nomad monitoring",
"nomad performance",
"nomad observability",
"nomad",
"metrics"
],
"id": "nomad",
"link": "https://signoz.io/docs/integrations/nomad/"
}
]

View File

@@ -1,3 +1,4 @@
/* eslint-disable sonarjs/cognitive-complexity */
import { getKeySuggestions } from 'api/querySuggestions/getKeySuggestions';
import { TelemetryFieldKey } from 'api/v5/v5';
import { AxiosResponse } from 'axios';
@@ -55,11 +56,10 @@ const useOptionsMenu = ({
initialOptions = {},
}: UseOptionsMenuProps): UseOptionsMenu => {
const { notifications } = useNotifications();
const {
preferences,
updateColumns,
updateFormatting,
} = usePreferenceContext();
const prefCtx = usePreferenceContext();
// TODO: send null to updateColumns and updateFormatting if dataSource is not logs or traces
const slice = dataSource === DataSource.TRACES ? prefCtx.traces : prefCtx.logs;
const { preferences, updateColumns, updateFormatting } = slice;
const [searchText, setSearchText] = useState<string>('');
const [isFocused, setIsFocused] = useState<boolean>(false);

View File

@@ -19,7 +19,6 @@ import { getOperatorValue } from 'container/QueryBuilder/filters/QueryBuilderSea
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import createQueryParams from 'lib/createQueryParams';
import { Compass } from 'lucide-react';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
import { useCallback, useMemo } from 'react';
import { Virtuoso } from 'react-virtuoso';
import { ILog } from 'types/api/logs/log';
@@ -212,24 +211,20 @@ function SpanLogs({
const renderContent = useMemo(
() => (
<div className="span-logs-list-container">
<PreferenceContextProvider>
<OverlayScrollbar isVirtuoso>
<Virtuoso
className="span-logs-virtuoso"
key="span-logs-virtuoso"
style={
logs.length <= 35 ? { height: `calc(${logs.length} * 22px)` } : {}
}
data={logs}
totalCount={logs.length}
itemContent={getItemContent}
overscan={200}
components={{
Footer: renderFooter,
}}
/>
</OverlayScrollbar>
</PreferenceContextProvider>
<OverlayScrollbar isVirtuoso>
<Virtuoso
className="span-logs-virtuoso"
key="span-logs-virtuoso"
style={logs.length <= 35 ? { height: `calc(${logs.length} * 22px)` } : {}}
data={logs}
totalCount={logs.length}
itemContent={getItemContent}
overscan={200}
components={{
Footer: renderFooter,
}}
/>
</OverlayScrollbar>
</div>
),
[logs, getItemContent, renderFooter],

View File

@@ -261,18 +261,16 @@ describe('SpanDetailsDrawer', () => {
const logsButton = screen.getByRole('radio', { name: /logs/i });
fireEvent.click(logsButton);
// Wait for logs view to open
// Wait for logs view to open and logs to be displayed
await waitFor(() => {
expect(screen.getByTestId('overlay-scrollbar')).toBeInTheDocument();
});
// Verify logs are displayed
await waitFor(() => {
// eslint-disable-next-line sonarjs/no-duplicate-string
expect(screen.getByTestId('raw-log-span-log-1')).toBeInTheDocument();
// eslint-disable-next-line sonarjs/no-duplicate-string
expect(screen.getByTestId('raw-log-span-log-2')).toBeInTheDocument();
// eslint-disable-next-line sonarjs/no-duplicate-string
expect(screen.getByTestId('raw-log-context-log-before')).toBeInTheDocument();
// eslint-disable-next-line sonarjs/no-duplicate-string
expect(screen.getByTestId('raw-log-context-log-after')).toBeInTheDocument();
});
});
@@ -285,12 +283,9 @@ describe('SpanDetailsDrawer', () => {
fireEvent.click(logsButton);
// Wait for all API calls to complete
await waitFor(
() => {
expect(GetMetricQueryRange).toHaveBeenCalledTimes(3);
},
{ timeout: 5000 },
);
await waitFor(() => {
expect(GetMetricQueryRange).toHaveBeenCalledTimes(3);
});
// Verify the three distinct queries were made
const [spanQuery, beforeQuery, afterQuery] = apiCallHistory;
@@ -319,12 +314,9 @@ describe('SpanDetailsDrawer', () => {
fireEvent.click(logsButton);
// Wait for all API calls to complete
await waitFor(
() => {
expect(GetMetricQueryRange).toHaveBeenCalledTimes(3);
},
{ timeout: 5000 },
);
await waitFor(() => {
expect(GetMetricQueryRange).toHaveBeenCalledTimes(3);
});
const [spanQuery, beforeQuery, afterQuery] = apiCallHistory;
@@ -484,9 +476,17 @@ describe('SpanDetailsDrawer', () => {
const logsButton = screen.getByRole('radio', { name: /logs/i });
fireEvent.click(logsButton);
// Wait for logs to load
// Wait for all API calls to complete first
await waitFor(() => {
expect(GetMetricQueryRange).toHaveBeenCalledTimes(3);
});
// Wait for all logs to be rendered - both span logs and context logs
await waitFor(() => {
expect(screen.getByTestId('raw-log-span-log-1')).toBeInTheDocument();
expect(screen.getByTestId('raw-log-span-log-2')).toBeInTheDocument();
expect(screen.getByTestId('raw-log-context-log-before')).toBeInTheDocument();
expect(screen.getByTestId('raw-log-context-log-after')).toBeInTheDocument();
});
// Verify span logs are highlighted

View File

@@ -6,8 +6,10 @@ import { DataSource } from 'types/common/queryBuilder';
export const useGetAllViews = (
sourcepage: DataSource | 'meter',
enabled?: boolean,
): UseQueryResult<AxiosResponse<AllViewsProps>, AxiosError> =>
useQuery<AxiosResponse<AllViewsProps>, AxiosError>({
queryKey: [{ sourcepage }],
queryFn: () => getAllViews(sourcepage as DataSource),
...(enabled !== undefined ? { enabled } : {}),
});

View File

@@ -8,7 +8,6 @@ import { isDrilldownEnabled } from 'container/QueryTable/Drilldown/drilldownUtil
import { useSafeNavigate } from 'hooks/useSafeNavigate';
import useUrlQuery from 'hooks/useUrlQuery';
import { useDashboard } from 'providers/Dashboard/Dashboard';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
import { useEffect, useState } from 'react';
import { generatePath, useLocation, useParams } from 'react-router-dom';
import { Widgets } from 'types/api/dashboard/getAll';
@@ -54,14 +53,12 @@ function DashboardWidget(): JSX.Element | null {
}
return (
<PreferenceContextProvider>
<NewWidget
yAxisUnit={selectedWidget?.yAxisUnit}
selectedGraph={selectedGraph}
fillSpans={selectedWidget?.fillSpans}
enableDrillDown={isDrilldownEnabled()}
/>
</PreferenceContextProvider>
<NewWidget
yAxisUnit={selectedWidget?.yAxisUnit}
selectedGraph={selectedGraph}
fillSpans={selectedWidget?.fillSpans}
enableDrillDown={isDrilldownEnabled()}
/>
);
}

View File

@@ -3,14 +3,9 @@ import ROUTES from 'constants/routes';
import InfraMonitoringHosts from 'container/InfraMonitoringHosts';
import InfraMonitoringK8s from 'container/InfraMonitoringK8s';
import { Inbox } from 'lucide-react';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
export const Hosts: TabRoutes = {
Component: (): JSX.Element => (
<PreferenceContextProvider>
<InfraMonitoringHosts />
</PreferenceContextProvider>
),
Component: (): JSX.Element => <InfraMonitoringHosts />,
name: (
<div className="tab-item">
<Inbox size={16} /> Hosts
@@ -21,11 +16,7 @@ export const Hosts: TabRoutes = {
};
export const Kubernetes: TabRoutes = {
Component: (): JSX.Element => (
<PreferenceContextProvider>
<InfraMonitoringK8s />
</PreferenceContextProvider>
),
Component: (): JSX.Element => <InfraMonitoringK8s />,
name: (
<div className="tab-item">
<Inbox size={16} /> Kubernetes

View File

@@ -3,7 +3,6 @@ import { liveLogsCompositeQuery } from 'container/LiveLogs/constants';
import LiveLogsContainer from 'container/LiveLogs/LiveLogsContainer';
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
import { useShareBuilderUrl } from 'hooks/queryBuilder/useShareBuilderUrl';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
import { useEffect } from 'react';
import { DataSource } from 'types/common/queryBuilder';
@@ -15,11 +14,7 @@ function LiveLogs(): JSX.Element {
handleSetConfig(PANEL_TYPES.LIST, DataSource.LOGS);
}, [handleSetConfig]);
return (
<PreferenceContextProvider>
<LiveLogsContainer />
</PreferenceContextProvider>
);
return <LiveLogsContainer />;
}
export default LiveLogs;

View File

@@ -10,7 +10,6 @@ import LogsFilters from 'container/LogsFilters';
import LogsSearchFilter from 'container/LogsSearchFilter';
import LogsTable from 'container/LogsTable';
import history from 'lib/history';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
import { useCallback, useMemo } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { useLocation } from 'react-router-dom';
@@ -83,71 +82,69 @@ function OldLogsExplorer(): JSX.Element {
};
return (
<PreferenceContextProvider>
<div className="old-logs-explorer">
<SpaceContainer
split={<Divider type="vertical" />}
align="center"
direction="horizontal"
>
<LogsSearchFilter />
<LogLiveTail />
</SpaceContainer>
<div className="old-logs-explorer">
<SpaceContainer
split={<Divider type="vertical" />}
align="center"
direction="horizontal"
>
<LogsSearchFilter />
<LogLiveTail />
</SpaceContainer>
<LogsAggregate />
<LogsAggregate />
<Row gutter={20} wrap={false}>
<LogsFilters />
<Col flex={1} className="logs-col-container">
<Row>
<Col flex={1}>
<Space align="baseline" direction="horizontal">
<Select
<Row gutter={20} wrap={false}>
<LogsFilters />
<Col flex={1} className="logs-col-container">
<Row>
<Col flex={1}>
<Space align="baseline" direction="horizontal">
<Select
getPopupContainer={popupContainer}
style={defaultSelectStyle}
value={selectedViewModeOption}
onChange={onChangeVeiwMode}
>
{viewModeOptionList.map((option) => (
<Select.Option key={option.value}>{option.label}</Select.Option>
))}
</Select>
{isFormatButtonVisible && (
<Popover
getPopupContainer={popupContainer}
style={defaultSelectStyle}
value={selectedViewModeOption}
onChange={onChangeVeiwMode}
placement="right"
content={renderPopoverContent}
>
{viewModeOptionList.map((option) => (
<Select.Option key={option.value}>{option.label}</Select.Option>
))}
</Select>
<Button>Format</Button>
</Popover>
)}
{isFormatButtonVisible && (
<Popover
getPopupContainer={popupContainer}
placement="right"
content={renderPopoverContent}
>
<Button>Format</Button>
</Popover>
)}
<Select
getPopupContainer={popupContainer}
style={defaultSelectStyle}
defaultValue={order}
onChange={handleChangeOrder}
>
{orderItems.map((item) => (
<Select.Option key={item.enum}>{item.name}</Select.Option>
))}
</Select>
</Space>
</Col>
<Select
getPopupContainer={popupContainer}
style={defaultSelectStyle}
defaultValue={order}
onChange={handleChangeOrder}
>
{orderItems.map((item) => (
<Select.Option key={item.enum}>{item.name}</Select.Option>
))}
</Select>
</Space>
</Col>
<Col>
<LogControls />
</Col>
</Row>
<Col>
<LogControls />
</Col>
</Row>
<LogsTable viewMode={viewMode} linesPerRow={linesPerRow} />
</Col>
</Row>
<LogsTable viewMode={viewMode} linesPerRow={linesPerRow} />
</Col>
</Row>
<LogDetailedView />
</div>
</PreferenceContextProvider>
<LogDetailedView />
</div>
);
}

View File

@@ -54,7 +54,8 @@ function LogsExplorer(): JSX.Element {
const [selectedView, setSelectedView] = useState<ExplorerViews>(() =>
getExplorerViewFromUrl(searchParams, panelTypesFromUrl),
);
const { preferences, loading: preferencesLoading } = usePreferenceContext();
const { logs } = usePreferenceContext();
const { preferences } = logs;
const [showFilters, setShowFilters] = useState<boolean>(() => {
const localStorageValue = getLocalStorageKey(
@@ -273,7 +274,7 @@ function LogsExplorer(): JSX.Element {
);
useEffect(() => {
if (!preferences || preferencesLoading) {
if (!preferences) {
return;
}
const migratedQuery = migrateOptionsQuery({
@@ -295,12 +296,7 @@ function LogsExplorer(): JSX.Element {
) {
redirectWithOptionsData(migratedQuery);
}
}, [
migrateOptionsQuery,
preferences,
redirectWithOptionsData,
preferencesLoading,
]);
}, [migrateOptionsQuery, preferences, redirectWithOptionsData]);
const toolbarViews = useMemo(
() => ({

View File

@@ -4,14 +4,9 @@ import { Compass, TowerControl, Workflow } from 'lucide-react';
import LogsExplorer from 'pages/LogsExplorer';
import Pipelines from 'pages/Pipelines';
import SaveView from 'pages/SaveView';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
export const logsExplorer: TabRoutes = {
Component: (): JSX.Element => (
<PreferenceContextProvider>
<LogsExplorer />
</PreferenceContextProvider>
),
Component: (): JSX.Element => <LogsExplorer />,
name: (
<div className="tab-item">
<Compass size={16} /> Explorer
@@ -22,11 +17,7 @@ export const logsExplorer: TabRoutes = {
};
export const logsPipelines: TabRoutes = {
Component: (): JSX.Element => (
<PreferenceContextProvider>
<Pipelines />
</PreferenceContextProvider>
),
Component: (): JSX.Element => <Pipelines />,
name: (
<div className="tab-item">
<Workflow size={16} /> Pipelines

View File

@@ -4,14 +4,9 @@ import BreakDownPage from 'container/MeterExplorer/Breakdown/BreakDown';
import ExplorerPage from 'container/MeterExplorer/Explorer';
import { Compass, TowerControl } from 'lucide-react';
import SaveView from 'pages/SaveView';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
export const Explorer: TabRoutes = {
Component: (): JSX.Element => (
<PreferenceContextProvider>
<ExplorerPage />
</PreferenceContextProvider>
),
Component: (): JSX.Element => <ExplorerPage />,
name: (
<div className="tab-item">
<Compass size={16} /> Explorer

View File

@@ -4,7 +4,6 @@ import ExplorerPage from 'container/MetricsExplorer/Explorer';
import SummaryPage from 'container/MetricsExplorer/Summary';
import { BarChart2, Compass, TowerControl } from 'lucide-react';
import SaveView from 'pages/SaveView';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
export const Summary: TabRoutes = {
Component: SummaryPage,
@@ -18,11 +17,7 @@ export const Summary: TabRoutes = {
};
export const Explorer: TabRoutes = {
Component: (): JSX.Element => (
<PreferenceContextProvider>
<ExplorerPage />
</PreferenceContextProvider>
),
Component: (): JSX.Element => <ExplorerPage />,
name: (
<div className="tab-item">
<Compass size={16} /> Explorer

View File

@@ -4,7 +4,6 @@ import NotFound from 'components/NotFound';
import Spinner from 'components/Spinner';
import NewDashboard from 'container/NewDashboard';
import { useDashboard } from 'providers/Dashboard/Dashboard';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
import { useEffect } from 'react';
import { ErrorType } from 'types/common';
@@ -36,11 +35,7 @@ function DashboardPage(): JSX.Element {
return <Spinner tip="Loading.." />;
}
return (
<PreferenceContextProvider>
<NewDashboard />
</PreferenceContextProvider>
);
return <NewDashboard />;
}
export default DashboardPage;

View File

@@ -5,15 +5,10 @@ import SaveView from 'pages/SaveView';
import TracesExplorer from 'pages/TracesExplorer';
import TracesFunnelDetails from 'pages/TracesFunnelDetails';
import TracesFunnels from 'pages/TracesFunnels';
import { PreferenceContextProvider } from 'providers/preferences/context/PreferenceContextProvider';
import { matchPath } from 'react-router-dom';
export const tracesExplorer: TabRoutes = {
Component: (): JSX.Element => (
<PreferenceContextProvider>
<TracesExplorer />
</PreferenceContextProvider>
),
Component: (): JSX.Element => <TracesExplorer />,
name: (
<div className="tab-item">
<Compass size={16} /> Explorer

View File

@@ -1,154 +0,0 @@
/* eslint-disable sonarjs/no-identical-functions */
import { render, screen } from '@testing-library/react';
import { TelemetryFieldKey } from 'api/v5/v5';
import {
FormattingOptions,
PreferenceMode,
Preferences,
} from 'providers/preferences/types';
import { MemoryRouter, Route, Switch } from 'react-router-dom';
import {
PreferenceContextProvider,
usePreferenceContext,
} from '../context/PreferenceContextProvider';
// Mock the usePreferenceSync hook
jest.mock('../sync/usePreferenceSync', () => ({
usePreferenceSync: jest.fn().mockReturnValue({
preferences: {
columns: [] as TelemetryFieldKey[],
formatting: {
maxLines: 2,
format: 'table',
fontSize: 'small',
version: 1,
} as FormattingOptions,
} as Preferences,
loading: false,
error: null,
updateColumns: jest.fn(),
updateFormatting: jest.fn(),
}),
}));
// Test component that consumes the context
function TestConsumer(): JSX.Element {
const context = usePreferenceContext();
return (
<div>
<div data-testid="mode">{context.mode}</div>
<div data-testid="dataSource">{context.dataSource}</div>
<div data-testid="loading">{String(context.loading)}</div>
<div data-testid="error">{String(context.error)}</div>
<div data-testid="savedViewId">{context.savedViewId || 'no-view-id'}</div>
</div>
);
}
describe('PreferenceContextProvider', () => {
it('should provide context with direct mode when no viewKey is present', () => {
render(
<MemoryRouter initialEntries={['/logs']}>
<Switch>
<Route
path="/logs"
component={(): JSX.Element => (
<PreferenceContextProvider>
<TestConsumer />
</PreferenceContextProvider>
)}
/>
</Switch>
</MemoryRouter>,
);
expect(screen.getByTestId('mode')).toHaveTextContent(PreferenceMode.DIRECT);
expect(screen.getByTestId('dataSource')).toHaveTextContent('logs');
expect(screen.getByTestId('loading')).toHaveTextContent('false');
expect(screen.getByTestId('error')).toHaveTextContent('null');
expect(screen.getByTestId('savedViewId')).toHaveTextContent('no-view-id');
});
it('should provide context with savedView mode when viewKey is present', () => {
render(
<MemoryRouter initialEntries={['/logs?viewKey="test-view-id"']}>
<Switch>
<Route
path="/logs"
component={(): JSX.Element => (
<PreferenceContextProvider>
<TestConsumer />
</PreferenceContextProvider>
)}
/>
</Switch>
</MemoryRouter>,
);
expect(screen.getByTestId('mode')).toHaveTextContent('savedView');
expect(screen.getByTestId('dataSource')).toHaveTextContent('logs');
expect(screen.getByTestId('savedViewId')).toHaveTextContent('test-view-id');
});
it('should set traces dataSource when pathname includes traces', () => {
render(
<MemoryRouter initialEntries={['/traces']}>
<Switch>
<Route
path="/traces"
component={(): JSX.Element => (
<PreferenceContextProvider>
<TestConsumer />
</PreferenceContextProvider>
)}
/>
</Switch>
</MemoryRouter>,
);
expect(screen.getByTestId('dataSource')).toHaveTextContent('traces');
});
it('should handle invalid viewKey JSON gracefully', () => {
// Mock console.error to avoid test output clutter
const originalConsoleError = console.error;
console.error = jest.fn();
render(
<MemoryRouter initialEntries={['/logs?viewKey=invalid-json']}>
<Switch>
<Route
path="/logs"
component={(): JSX.Element => (
<PreferenceContextProvider>
<TestConsumer />
</PreferenceContextProvider>
)}
/>
</Switch>
</MemoryRouter>,
);
expect(screen.getByTestId('mode')).toHaveTextContent(PreferenceMode.DIRECT);
expect(console.error).toHaveBeenCalled();
// Restore console.error
console.error = originalConsoleError;
});
it('should throw error when usePreferenceContext is used outside provider', () => {
// Suppress the error output for this test
const originalConsoleError = console.error;
console.error = jest.fn();
expect(() => {
render(<TestConsumer />);
}).toThrow(
'usePreferenceContext must be used within PreferenceContextProvider',
);
// Restore console.error
console.error = originalConsoleError;
});
});

View File

@@ -0,0 +1,402 @@
/* eslint-disable sonarjs/no-duplicate-string */
import { TelemetryFieldKey } from 'api/v5/v5';
import { LOCALSTORAGE } from 'constants/localStorage';
import { LogViewMode } from 'container/LogsTable';
import {
defaultLogsSelectedColumns,
defaultTraceSelectedColumns,
} from 'container/OptionsMenu/constants';
import { FontSize } from 'container/OptionsMenu/types';
import { render, screen, userEvent } from 'tests/test-utils';
import { DataSource } from 'types/common/queryBuilder';
import { usePreferenceContext } from '../context/PreferenceContextProvider';
const ROUTE_LOGS = '/logs';
const ROUTE_TRACES = '/traces';
const TESTID_LOGS = 'logs';
const TESTID_TRACES = 'traces';
type LogsLocalOptions = {
selectColumns?: TelemetryFieldKey[];
maxLines?: number;
format?: string;
fontSize?: string;
version?: number;
};
type TracesLocalOptions = {
selectColumns?: TelemetryFieldKey[];
};
function setLocalStorageJSON(key: string, value: unknown): void {
localStorage.setItem(key, JSON.stringify(value));
}
function getLocalStorageJSON<T>(key: string): T | null {
const raw = localStorage.getItem(key);
return raw ? (JSON.parse(raw) as T) : null;
}
function Consumer({
dataSource,
testIdPrefix,
}: {
dataSource: DataSource;
testIdPrefix: string;
}): JSX.Element {
const ctx = usePreferenceContext();
const slice = dataSource === DataSource.TRACES ? ctx.traces : ctx.logs;
return (
<div>
<div data-testid={`${testIdPrefix}-loading`}>{String(slice.loading)}</div>
<div data-testid={`${testIdPrefix}-columns-len`}>
{String(slice.preferences?.columns?.length || 0)}
</div>
<button
data-testid={`${testIdPrefix}-update-columns`}
type="button"
onClick={(): void => {
const newCols: TelemetryFieldKey[] =
dataSource === DataSource.TRACES
? (defaultTraceSelectedColumns.slice(0, 1) as TelemetryFieldKey[])
: (defaultLogsSelectedColumns.slice(0, 1) as TelemetryFieldKey[]);
slice.updateColumns(newCols);
}}
>
update
</button>
</div>
);
}
describe('PreferencesProvider integration', () => {
beforeEach(() => {
localStorage.clear();
});
describe('Logs', () => {
it('loads defaults when no localStorage or url provided', () => {
render(
<Consumer dataSource={DataSource.LOGS} testIdPrefix={TESTID_LOGS} />,
undefined,
{
initialRoute: ROUTE_LOGS,
},
);
expect(screen.getByTestId('logs-loading')).toHaveTextContent('false');
expect(
Number(screen.getByTestId('logs-columns-len').textContent),
).toBeGreaterThan(0);
});
it('respects localStorage when present', () => {
setLocalStorageJSON(LOCALSTORAGE.LOGS_LIST_OPTIONS, {
selectColumns: [{ name: 'ls.col' }],
maxLines: 5,
format: 'json',
fontSize: 'large',
version: 2,
});
render(
<Consumer dataSource={DataSource.LOGS} testIdPrefix={TESTID_LOGS} />,
undefined,
{
initialRoute: ROUTE_LOGS,
},
);
expect(Number(screen.getByTestId('logs-columns-len').textContent)).toBe(1);
});
it('direct mode updateColumns persists to localStorage', async () => {
const user = userEvent.setup({ pointerEventsCheck: 0 });
render(
<Consumer dataSource={DataSource.LOGS} testIdPrefix={TESTID_LOGS} />,
undefined,
{
initialRoute: ROUTE_LOGS,
},
);
await user.click(screen.getByTestId('logs-update-columns'));
const stored = getLocalStorageJSON<LogsLocalOptions>(
LOCALSTORAGE.LOGS_LIST_OPTIONS,
);
expect(stored?.selectColumns).toEqual([
defaultLogsSelectedColumns[0] as TelemetryFieldKey,
]);
});
it('saved view mode uses in-memory preferences (no localStorage write)', async () => {
const viewKey = JSON.stringify('saved-view-id-1');
const initialEntry = `/logs?viewKey=${encodeURIComponent(viewKey)}`;
const user = userEvent.setup({ pointerEventsCheck: 0 });
render(
<Consumer dataSource={DataSource.LOGS} testIdPrefix="logs" />,
undefined,
{
initialRoute: initialEntry,
},
);
await user.click(screen.getByTestId('logs-update-columns'));
const stored = getLocalStorageJSON<LogsLocalOptions>(
LOCALSTORAGE.LOGS_LIST_OPTIONS,
);
expect(stored?.selectColumns).toBeUndefined();
});
it('url options override defaults', () => {
const options = {
selectColumns: [{ name: 'url.col' }],
maxLines: 7,
format: 'json',
fontSize: 'large',
version: 2,
};
const originalLocation = window.location;
Object.defineProperty(window, 'location', {
writable: true,
value: {
...originalLocation,
search: `?options=${encodeURIComponent(JSON.stringify(options))}`,
},
});
render(
<Consumer dataSource={DataSource.LOGS} testIdPrefix={TESTID_LOGS} />,
undefined,
{
initialRoute: ROUTE_LOGS,
},
);
// restore
Object.defineProperty(window, 'location', {
writable: true,
value: originalLocation,
});
expect(Number(screen.getByTestId('logs-columns-len').textContent)).toBe(1);
});
it('updateFormatting persists to localStorage in direct mode', async () => {
const user = userEvent.setup({ pointerEventsCheck: 0 });
function FormattingConsumer(): JSX.Element {
const { logs } = usePreferenceContext();
return (
<button
data-testid="logs-update-formatting"
type="button"
onClick={(): void =>
logs.updateFormatting({
maxLines: 9,
format: 'json' as LogViewMode,
fontSize: 'large' as FontSize,
version: 2,
})
}
>
fmt
</button>
);
}
render(<FormattingConsumer />, undefined, { initialRoute: '/logs' });
await user.click(screen.getByTestId('logs-update-formatting'));
const stored = getLocalStorageJSON<LogsLocalOptions>(
LOCALSTORAGE.LOGS_LIST_OPTIONS,
);
expect(stored?.maxLines).toBe(9);
expect(stored?.format).toBe('json');
expect(stored?.fontSize).toBe('large');
expect(stored?.version).toBe(2);
});
it('saved view mode updates in-memory preferences (columns-len changes)', async () => {
const user = userEvent.setup({ pointerEventsCheck: 0 });
const viewKey = JSON.stringify('saved-view-id-3');
const initialEntry = `/logs?viewKey=${encodeURIComponent(viewKey)}`;
render(
<Consumer dataSource={DataSource.LOGS} testIdPrefix={TESTID_LOGS} />,
undefined,
{ initialRoute: initialEntry },
);
const before = Number(screen.getByTestId('logs-columns-len').textContent);
await user.click(screen.getByTestId('logs-update-columns'));
const after = Number(screen.getByTestId('logs-columns-len').textContent);
expect(after).toBeGreaterThanOrEqual(1);
// Should change from default to 1 for our new selection; tolerate default already being >=1
if (before !== after) {
expect(after).toBe(1);
}
});
});
describe('Traces', () => {
it('loads defaults when no localStorage or url provided', () => {
render(
<Consumer dataSource={DataSource.TRACES} testIdPrefix={TESTID_TRACES} />,
undefined,
{
initialRoute: ROUTE_TRACES,
},
);
expect(screen.getByTestId('traces-loading')).toHaveTextContent('false');
expect(
Number(screen.getByTestId('traces-columns-len').textContent),
).toBeGreaterThan(0);
});
it('respects localStorage when present', () => {
setLocalStorageJSON(LOCALSTORAGE.TRACES_LIST_OPTIONS, {
selectColumns: [{ name: 'trace.ls.col' }],
});
render(
<Consumer dataSource={DataSource.TRACES} testIdPrefix={TESTID_TRACES} />,
undefined,
{
initialRoute: ROUTE_TRACES,
},
);
expect(Number(screen.getByTestId('traces-columns-len').textContent)).toBe(1);
});
it('direct mode updateColumns persists to localStorage', async () => {
const user = userEvent.setup({ pointerEventsCheck: 0 });
render(
<Consumer dataSource={DataSource.TRACES} testIdPrefix={TESTID_TRACES} />,
undefined,
{
initialRoute: ROUTE_TRACES,
},
);
await user.click(screen.getByTestId('traces-update-columns'));
const stored = getLocalStorageJSON<TracesLocalOptions>(
LOCALSTORAGE.TRACES_LIST_OPTIONS,
);
expect(stored?.selectColumns).toEqual([
defaultTraceSelectedColumns[0] as TelemetryFieldKey,
]);
});
it('saved view mode uses in-memory preferences (no localStorage write)', async () => {
const viewKey = JSON.stringify('saved-view-id-2');
const initialEntry = `/traces?viewKey=${encodeURIComponent(viewKey)}`;
const user = userEvent.setup({ pointerEventsCheck: 0 });
render(
<Consumer dataSource={DataSource.TRACES} testIdPrefix="traces" />,
undefined,
{
initialRoute: initialEntry,
},
);
await user.click(screen.getByTestId('traces-update-columns'));
const stored = getLocalStorageJSON<TracesLocalOptions>(
LOCALSTORAGE.TRACES_LIST_OPTIONS,
);
expect(stored?.selectColumns).toBeUndefined();
});
it('url options override defaults', () => {
const options = {
selectColumns: [{ name: 'trace.url.col' }],
};
const originalLocation = window.location;
Object.defineProperty(window, 'location', {
writable: true,
value: {
...originalLocation,
search: `?options=${encodeURIComponent(JSON.stringify(options))}`,
},
});
render(
<Consumer dataSource={DataSource.TRACES} testIdPrefix={TESTID_TRACES} />,
undefined,
{ initialRoute: ROUTE_TRACES },
);
Object.defineProperty(window, 'location', {
writable: true,
value: originalLocation,
});
expect(Number(screen.getByTestId('traces-columns-len').textContent)).toBe(1);
});
it('updateFormatting is a no-op in direct mode (no localStorage write)', async () => {
const user = userEvent.setup({ pointerEventsCheck: 0 });
function TracesFormattingConsumer(): JSX.Element {
const { traces } = usePreferenceContext();
return (
<button
data-testid="traces-update-formatting"
type="button"
onClick={(): void =>
traces.updateFormatting({
maxLines: 9,
format: 'json' as LogViewMode,
fontSize: 'large' as FontSize,
version: 2,
})
}
>
fmt
</button>
);
}
render(<TracesFormattingConsumer />, undefined, { initialRoute: '/traces' });
await user.click(screen.getByTestId('traces-update-formatting'));
const stored = getLocalStorageJSON<TracesLocalOptions>(
LOCALSTORAGE.TRACES_LIST_OPTIONS,
);
expect(stored).toBeNull();
});
it('saved view mode updates in-memory preferences (columns-len changes)', async () => {
const user = userEvent.setup({ pointerEventsCheck: 0 });
const viewKey = JSON.stringify('saved-view-id-4');
const initialEntry = `/traces?viewKey=${encodeURIComponent(viewKey)}`;
render(
<Consumer dataSource={DataSource.TRACES} testIdPrefix={TESTID_TRACES} />,
undefined,
{ initialRoute: initialEntry },
);
const before = Number(screen.getByTestId('traces-columns-len').textContent);
await user.click(screen.getByTestId('traces-update-columns'));
const after = Number(screen.getByTestId('traces-columns-len').textContent);
expect(after).toBeGreaterThanOrEqual(1);
if (before !== after) {
expect(after).toBe(1);
}
});
});
});

View File

@@ -11,18 +11,18 @@ jest.mock('../configs/logsLoaderConfig', () => ({
__esModule: true,
default: {
priority: ['local', 'url', 'default'],
local: jest.fn().mockResolvedValue({
local: jest.fn(() => ({
columns: [{ name: 'local-column' }],
formatting: { maxLines: 5, format: 'table', fontSize: 'medium', version: 1 },
}),
url: jest.fn().mockResolvedValue({
})),
url: jest.fn(() => ({
columns: [{ name: 'url-column' }],
formatting: { maxLines: 3, format: 'table', fontSize: 'small', version: 1 },
}),
default: jest.fn().mockResolvedValue({
})),
default: jest.fn(() => ({
columns: [{ name: 'default-column' }],
formatting: { maxLines: 2, format: 'table', fontSize: 'small', version: 1 },
}),
})),
},
}));
@@ -30,15 +30,15 @@ jest.mock('../configs/tracesLoaderConfig', () => ({
__esModule: true,
default: {
priority: ['local', 'url', 'default'],
local: jest.fn().mockResolvedValue({
local: jest.fn(() => ({
columns: [{ name: 'local-trace-column' }],
}),
url: jest.fn().mockResolvedValue({
})),
url: jest.fn(() => ({
columns: [{ name: 'url-trace-column' }],
}),
default: jest.fn().mockResolvedValue({
})),
default: jest.fn(() => ({
columns: [{ name: 'default-trace-column' }],
}),
})),
},
}));
@@ -57,11 +57,6 @@ describe('usePreferenceLoader', () => {
}),
);
// Initially it should be loading
expect(result.current.loading).toBe(true);
expect(result.current.preferences).toBe(null);
expect(result.current.error).toBe(null);
// Wait for the loader to complete
await waitFor(() => {
expect(result.current.loading).toBe(false);
@@ -123,30 +118,33 @@ describe('usePreferenceLoader', () => {
});
it('should handle errors during loading', async () => {
// Mock an error in the loader using jest.spyOn
const localSpy = jest.spyOn(logsLoaderConfig, 'local');
localSpy.mockRejectedValueOnce(new Error('Loading failed'));
// Make first call succeed (initial state), second call throw in reSync effect
const localSpy: jest.SpyInstance = jest.spyOn(logsLoaderConfig, 'local');
localSpy.mockImplementationOnce(() => ({
columns: [{ name: 'local-column' }],
formatting: { maxLines: 5, format: 'table', fontSize: 'medium', version: 1 },
}));
localSpy.mockImplementationOnce(() => {
throw new Error('Loading failed');
});
const setReSync = jest.fn();
const { result } = renderHook(() =>
usePreferenceLoader({
dataSource: DataSource.LOGS,
reSync: false,
reSync: true,
setReSync,
}),
);
// Wait for the loader to complete
await waitFor(() => {
expect(result.current.loading).toBe(false);
expect(result.current.error).toBeInstanceOf(Error);
expect(result.current.error?.message).toBe('Loading failed');
});
// Should have set the error
expect(result.current.error).toBeInstanceOf(Error);
expect(result.current.error?.message).toBe('Loading failed');
expect(result.current.preferences).toBe(null);
// Reset reSync should be called
expect(setReSync).toHaveBeenCalledWith(false);
// Restore original implementation
localSpy.mockRestore();
});
});

View File

@@ -10,10 +10,10 @@ import { FormattingOptions } from '../types';
// --- LOGS preferences loader config ---
const logsLoaders = {
local: async (): Promise<{
local: (): {
columns: BaseAutocompleteData[];
formatting: FormattingOptions;
}> => {
} => {
const local = getLocalStorageKey(LOCALSTORAGE.LOGS_LIST_OPTIONS);
if (local) {
try {
@@ -31,10 +31,10 @@ const logsLoaders = {
}
return { columns: [], formatting: undefined } as any;
},
url: async (): Promise<{
url: (): {
columns: BaseAutocompleteData[];
formatting: FormattingOptions;
}> => {
} => {
const urlParams = new URLSearchParams(window.location.search);
try {
const options = JSON.parse(urlParams.get('options') || '{}');
@@ -50,10 +50,10 @@ const logsLoaders = {
} catch {}
return { columns: [], formatting: undefined } as any;
},
default: async (): Promise<{
default: (): {
columns: TelemetryFieldKey[];
formatting: FormattingOptions;
}> => ({
} => ({
columns: defaultLogsSelectedColumns,
formatting: {
maxLines: 2,

View File

@@ -7,9 +7,9 @@ import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteRe
// --- TRACES preferences loader config ---
const tracesLoaders = {
local: async (): Promise<{
local: (): {
columns: BaseAutocompleteData[];
}> => {
} => {
const local = getLocalStorageKey(LOCALSTORAGE.TRACES_LIST_OPTIONS);
if (local) {
try {
@@ -21,9 +21,9 @@ const tracesLoaders = {
}
return { columns: [] };
},
url: async (): Promise<{
url: (): {
columns: BaseAutocompleteData[];
}> => {
} => {
const urlParams = new URLSearchParams(window.location.search);
try {
const options = JSON.parse(urlParams.get('options') || '{}');
@@ -33,9 +33,9 @@ const tracesLoaders = {
} catch {}
return { columns: [] };
},
default: async (): Promise<{
default: (): {
columns: TelemetryFieldKey[];
}> => ({
} => ({
columns: defaultTraceSelectedColumns,
}),
priority: ['local', 'url', 'default'] as const,

View File

@@ -4,7 +4,6 @@ import {
PreferenceMode,
} from 'providers/preferences/types';
import React, { createContext, useContext, useMemo } from 'react';
import { useLocation } from 'react-router-dom';
import { DataSource } from 'types/common/queryBuilder';
import { usePreferenceSync } from '../sync/usePreferenceSync';
@@ -18,7 +17,6 @@ export function PreferenceContextProvider({
}: {
children: React.ReactNode;
}): JSX.Element {
const location = useLocation();
const params = useUrlQuery();
let savedViewId = '';
@@ -30,41 +28,25 @@ export function PreferenceContextProvider({
console.error(e);
}
}
let dataSource: DataSource = DataSource.LOGS;
if (location.pathname.includes('traces')) dataSource = DataSource.TRACES;
const {
preferences,
loading,
error,
updateColumns,
updateFormatting,
} = usePreferenceSync({
const logsSlice = usePreferenceSync({
mode: savedViewId ? PreferenceMode.SAVED_VIEW : PreferenceMode.DIRECT,
savedViewId: savedViewId || undefined,
dataSource,
dataSource: DataSource.LOGS,
});
const tracesSlice = usePreferenceSync({
mode: savedViewId ? PreferenceMode.SAVED_VIEW : PreferenceMode.DIRECT,
savedViewId: savedViewId || undefined,
dataSource: DataSource.TRACES,
});
const value = useMemo<PreferenceContextValue>(
() => ({
preferences,
loading,
error,
mode: savedViewId ? PreferenceMode.SAVED_VIEW : PreferenceMode.DIRECT,
savedViewId: savedViewId || undefined,
dataSource,
updateColumns,
updateFormatting,
logs: logsSlice,
traces: tracesSlice,
}),
[
savedViewId,
dataSource,
preferences,
loading,
error,
updateColumns,
updateFormatting,
],
[logsSlice, tracesSlice],
);
return (

View File

@@ -17,55 +17,48 @@ const migrateColumns = (columns: any): any =>
return column;
});
// Generic preferences loader that works with any config
async function preferencesLoader<T>(config: {
// Generic preferences loader that works with any config (synchronous version)
function preferencesLoader<T>(config: {
priority: readonly string[];
[key: string]: any;
}): Promise<T> {
const findValidLoader = async (): Promise<T> => {
// Try each loader in priority order
const results = await Promise.all(
config.priority.map(async (source) => ({
source,
result: await config[source](),
})),
);
}): T {
// Try each loader in priority order synchronously
const results = config.priority.map((source: string) => ({
source,
result: config[source](),
}));
// Find valid columns and formatting independently
const validColumnsResult = results.find(
({ result }) => result.columns?.length,
);
// Find valid columns and formatting independently
const validColumnsResult = results.find(
({ result }) => result.columns?.length,
);
const validFormattingResult = results.find(({ result }) => result.formatting);
const validFormattingResult = results.find(({ result }) => result.formatting);
const migratedColumns = validColumnsResult?.result.columns
? migrateColumns(validColumnsResult.result.columns)
: undefined;
const migratedColumns = validColumnsResult?.result.columns
? migrateColumns(validColumnsResult?.result.columns)
: undefined;
// Combine valid results or fallback to default
const finalResult = {
columns: migratedColumns || config.default().columns,
formatting:
validFormattingResult?.result.formatting || config.default().formatting,
};
return finalResult as T;
// Combine valid results or fallback to default
const finalResult = {
columns: migratedColumns || config.default().columns,
formatting:
validFormattingResult?.result.formatting || config.default().formatting,
};
return findValidLoader();
return finalResult as T;
}
// Use the generic loader with specific configs
async function logsPreferencesLoader(): Promise<{
function logsPreferencesLoader(): {
columns: TelemetryFieldKey[];
formatting: FormattingOptions;
}> {
} {
return preferencesLoader(logsLoaderConfig);
}
async function tracesPreferencesLoader(): Promise<{
function tracesPreferencesLoader(): {
columns: TelemetryFieldKey[];
}> {
} {
return preferencesLoader(tracesLoaderConfig);
}
@@ -82,29 +75,36 @@ export function usePreferenceLoader({
loading: boolean;
error: Error | null;
} {
const [preferences, setPreferences] = useState<Preferences | null>(null);
const [loading, setLoading] = useState(true);
const [preferences, setPreferences] = useState<Preferences | null>(() => {
if (dataSource === DataSource.LOGS) {
const { columns, formatting } = logsPreferencesLoader();
return { columns, formatting };
}
if (dataSource === DataSource.TRACES) {
const { columns } = tracesPreferencesLoader();
return { columns };
}
return null;
});
const [error, setError] = useState<Error | null>(null);
useEffect((): void => {
async function loadPreferences(): Promise<void> {
setLoading(true);
function loadPreferences(): void {
setError(null);
try {
if (dataSource === DataSource.LOGS) {
const { columns, formatting } = await logsPreferencesLoader();
const { columns, formatting } = logsPreferencesLoader();
setPreferences({ columns, formatting });
}
if (dataSource === DataSource.TRACES) {
const { columns } = await tracesPreferencesLoader();
const { columns } = tracesPreferencesLoader();
setPreferences({ columns });
}
} catch (e) {
setError(e as Error);
} finally {
setLoading(false);
// Reset reSync back to false after loading is complete
if (reSync) {
setReSync(false);
@@ -113,10 +113,10 @@ export function usePreferenceLoader({
}
// Only load preferences on initial mount or when reSync is true
if (loading || reSync) {
if (reSync) {
loadPreferences();
}
}, [dataSource, reSync, setReSync, loading]);
}, [dataSource, reSync, setReSync]);
return { preferences, loading, error };
return { preferences, loading: false, error };
}

View File

@@ -25,7 +25,10 @@ export function usePreferenceSync({
updateColumns: (newColumns: TelemetryFieldKey[]) => void;
updateFormatting: (newFormatting: FormattingOptions) => void;
} {
const { data: viewsData } = useGetAllViews(dataSource);
const { data: viewsData } = useGetAllViews(
dataSource,
mode === PreferenceMode.SAVED_VIEW,
);
const [
savedViewPreferences,

View File

@@ -1,24 +1,25 @@
import { TelemetryFieldKey } from 'api/v5/v5';
import { LogViewMode } from 'container/LogsTable';
import { FontSize } from 'container/OptionsMenu/types';
import { DataSource } from 'types/common/queryBuilder';
export enum PreferenceMode {
SAVED_VIEW = 'savedView',
DIRECT = 'direct',
}
export interface PreferenceContextValue {
export interface PreferenceSlice {
preferences: Preferences | null;
loading: boolean;
error: Error | null;
mode: PreferenceMode;
savedViewId?: string;
dataSource: DataSource;
updateColumns: (newColumns: TelemetryFieldKey[]) => void;
updateFormatting: (newFormatting: FormattingOptions) => void;
}
export interface PreferenceContextValue {
logs: PreferenceSlice;
traces: PreferenceSlice;
}
export interface FormattingOptions {
maxLines?: number;
format?: LogViewMode;

View File

@@ -1,11 +0,0 @@
# Copy this to .env and fill in your values
# Base URL for the application
SIGNOZ_E2E_BASE_URL=https://app.us.staging.signoz.cloud
# Test credentials
SIGNOZ_E2E_USERNAME=
SIGNOZ_E2E_PASSWORD=
# API endpoint (if needed)
SIGNOZ_E2E_API_URL=https://api.us.staging.signoz.cloud

View File

@@ -1,92 +0,0 @@
---
description: Use this agent when you need to create comprehensive test plan for a web application or website.
tools: ['edit/createFile', 'edit/createDirectory', 'search/fileSearch', 'search/textSearch', 'search/listDirectory', 'search/readFile', 'playwright-test/browser_click', 'playwright-test/browser_close', 'playwright-test/browser_console_messages', 'playwright-test/browser_drag', 'playwright-test/browser_evaluate', 'playwright-test/browser_file_upload', 'playwright-test/browser_handle_dialog', 'playwright-test/browser_hover', 'playwright-test/browser_navigate', 'playwright-test/browser_navigate_back', 'playwright-test/browser_network_requests', 'playwright-test/browser_press_key', 'playwright-test/browser_select_option', 'playwright-test/browser_snapshot', 'playwright-test/browser_take_screenshot', 'playwright-test/browser_type', 'playwright-test/browser_wait_for', 'playwright-test/planner_setup_page']
---
You are an expert web test planner with extensive experience in quality assurance, user experience testing, and test
scenario design. Your expertise includes functional testing, edge case identification, and comprehensive test coverage
planning.
You will:
1. **Navigate and Explore**
- Invoke the `planner_setup_page` tool once to set up page before using any other tools
- Explore the browser snapshot
- Do not take screenshots unless absolutely necessary
- Use browser_* tools to navigate and discover interface
- Thoroughly explore the interface, identifying all interactive elements, forms, navigation paths, and functionality
2. **Analyze User Flows**
- Map out the primary user journeys and identify critical paths through the application
- Consider different user types and their typical behaviors
3. **Design Comprehensive Scenarios**
Create detailed test scenarios that cover:
- Happy path scenarios (normal user behavior)
- Edge cases and boundary conditions
- Error handling and validation
4. **Structure Test Plans**
Each scenario must include:
- Clear, descriptive title
- Detailed step-by-step instructions
- Expected outcomes where appropriate
- Assumptions about starting state (always assume blank/fresh state)
- Success criteria and failure conditions
5. **Create Documentation**
Save your test plan as requested:
- Executive summary of the tested page/application
- Individual scenarios as separate sections
- Each scenario formatted with numbered steps
- Clear expected results for verification
<example-spec>
# TodoMVC Application - Comprehensive Test Plan
## Application Overview
The TodoMVC application is a React-based todo list manager that provides core task management functionality. The
application features:
- **Task Management**: Add, edit, complete, and delete individual todos
- **Bulk Operations**: Mark all todos as complete/incomplete and clear all completed todos
- **Filtering**: View todos by All, Active, or Completed status
- **URL Routing**: Support for direct navigation to filtered views via URLs
- **Counter Display**: Real-time count of active (incomplete) todos
- **Persistence**: State maintained during session (browser refresh behavior not tested)
## Test Scenarios
### 1. Adding New Todos
**Seed:** `tests/seed.spec.ts`
#### 1.1 Add Valid Todo
**Steps:**
1. Click in the "What needs to be done?" input field
2. Type "Buy groceries"
3. Press Enter key
**Expected Results:**
- Todo appears in the list with unchecked checkbox
- Counter shows "1 item left"
- Input field is cleared and ready for next entry
- Todo list controls become visible (Mark all as complete checkbox)
#### 1.2
...
</example-spec>
**Quality Standards**:
- Write steps that are specific enough for any tester to follow
- Include negative testing scenarios
- Ensure scenarios are independent and can be run in any order
**Output Format**: Always save the complete test plan as a markdown file with clear headings, numbered steps, and
professional formatting suitable for sharing with development and QA teams.
<example>Context: User wants to test a new e-commerce checkout flow. user: 'I need test scenarios for our new checkout process at https://mystore.com/checkout' assistant: 'I'll use the planner agent to navigate to your checkout page and create comprehensive test scenarios.' <commentary> The user needs test planning for a specific web page, so use the planner agent to explore and create test scenarios. </commentary></example>
<example>Context: User has deployed a new feature and wants thorough testing coverage. user: 'Can you help me test our new user dashboard at https://app.example.com/dashboard?' assistant: 'I'll launch the planner agent to explore your dashboard and develop detailed test scenarios.' <commentary> This requires web exploration and test scenario creation, perfect for the planner agent. </commentary></example>

View File

@@ -1,58 +0,0 @@
---
description: Use this agent when you need to create automated browser tests using Playwright.
tools: ['search/fileSearch', 'search/textSearch', 'search/listDirectory', 'search/readFile', 'playwright-test/browser_click', 'playwright-test/browser_drag', 'playwright-test/browser_evaluate', 'playwright-test/browser_file_upload', 'playwright-test/browser_handle_dialog', 'playwright-test/browser_hover', 'playwright-test/browser_navigate', 'playwright-test/browser_press_key', 'playwright-test/browser_select_option', 'playwright-test/browser_snapshot', 'playwright-test/browser_type', 'playwright-test/browser_verify_element_visible', 'playwright-test/browser_verify_list_visible', 'playwright-test/browser_verify_text_visible', 'playwright-test/browser_verify_value', 'playwright-test/browser_wait_for', 'playwright-test/generator_read_log', 'playwright-test/generator_setup_page', 'playwright-test/generator_write_test']
---
You are a Playwright Test Generator, an expert in browser automation and end-to-end testing.
Your specialty is creating robust, reliable Playwright tests that accurately simulate user interactions and validate
application behavior.
# For each test you generate
- Obtain the test plan with all the steps and verification specification
- Run the `generator_setup_page` tool to set up page for the scenario
- For each step and verification in the scenario, do the following:
- Use Playwright tool to manually execute it in real-time.
- Use the step description as the intent for each Playwright tool call.
- Retrieve generator log via `generator_read_log`
- Immediately after reading the test log, invoke `generator_write_test` with the generated source code
- File should contain single test
- File name must be fs-friendly scenario name
- Test must be placed in a describe matching the top-level test plan item
- Test title must match the scenario name
- Includes a comment with the step text before each step execution. Do not duplicate comments if step requires
multiple actions.
- Always use best practices from the log when generating tests.
<example-generation>
For following plan:
```markdown file=specs/plan.md
### 1. Adding New Todos
**Seed:** `tests/seed.spec.ts`
#### 1.1 Add Valid Todo
**Steps:**
1. Click in the "What needs to be done?" input field
#### 1.2 Add Multiple Todos
...
```
Following file is generated:
```ts file=add-valid-todo.spec.ts
// spec: specs/plan.md
// seed: tests/seed.spec.ts
test.describe('Adding New Todos', () => {
test('Add Valid Todo', async { page } => {
// 1. Click in the "What needs to be done?" input field
await page.click(...);
...
});
});
```
</example-generation>
<example>Context: User wants to test a login flow on their web application. user: 'I need a test that logs into my app at localhost:3000 with username admin@test.com and password 123456, then verifies the dashboard page loads' assistant: 'I'll use the generator agent to create and validate this login test for you' <commentary> The user needs a specific browser automation test created, which is exactly what the generator agent is designed for. </commentary></example>
<example>Context: User has built a new checkout flow and wants to ensure it works correctly. user: 'Can you create a test that adds items to cart, proceeds to checkout, fills in payment details, and confirms the order?' assistant: 'I'll use the generator agent to build a comprehensive checkout flow test' <commentary> This is a complex user journey that needs to be automated and tested, perfect for the generator agent. </commentary></example>

View File

@@ -1,44 +0,0 @@
---
description: Use this agent when you need to debug and fix failing Playwright tests.
tools: ['edit/createFile', 'edit/createDirectory', 'edit/editFiles', 'search/fileSearch', 'search/textSearch', 'search/listDirectory', 'search/readFile', 'playwright-test/browser_console_messages', 'playwright-test/browser_evaluate', 'playwright-test/browser_generate_locator', 'playwright-test/browser_network_requests', 'playwright-test/browser_snapshot', 'playwright-test/test_debug', 'playwright-test/test_list', 'playwright-test/test_run']
---
You are the Playwright Test Healer, an expert test automation engineer specializing in debugging and
resolving Playwright test failures. Your mission is to systematically identify, diagnose, and fix
broken Playwright tests using a methodical approach.
Your workflow:
1. **Initial Execution**: Run all tests using playwright_test_run_test tool to identify failing tests
2. **Debug failed tests**: For each failing test run playwright_test_debug_test.
3. **Error Investigation**: When the test pauses on errors, use available Playwright MCP tools to:
- Examine the error details
- Capture page snapshot to understand the context
- Analyze selectors, timing issues, or assertion failures
4. **Root Cause Analysis**: Determine the underlying cause of the failure by examining:
- Element selectors that may have changed
- Timing and synchronization issues
- Data dependencies or test environment problems
- Application changes that broke test assumptions
5. **Code Remediation**: Edit the test code to address identified issues, focusing on:
- Updating selectors to match current application state
- Fixing assertions and expected values
- Improving test reliability and maintainability
- For inherently dynamic data, utilize regular expressions to produce resilient locators
6. **Verification**: Restart the test after each fix to validate the changes
7. **Iteration**: Repeat the investigation and fixing process until the test passes cleanly
Key principles:
- Be systematic and thorough in your debugging approach
- Document your findings and reasoning for each fix
- Prefer robust, maintainable solutions over quick hacks
- Use Playwright best practices for reliable test automation
- If multiple errors exist, fix them one at a time and retest
- Provide clear explanations of what was broken and how you fixed it
- You will continue this process until the test runs successfully without any failures or errors.
- If the error persists and you have high level of confidence that the test is correct, mark this test as test.fixme()
so that it is skipped during the execution. Add a comment before the failing step explaining what is happening instead
of the expected behavior.
- Do not ask user questions, you are not interactive tool, do the most reasonable thing possible to pass the test.
- Never wait for networkidle or use other discouraged or deprecated apis
<example>Context: A developer has a failing Playwright test that needs to be debugged and fixed. user: 'The login test is failing, can you fix it?' assistant: 'I'll use the healer agent to debug and fix the failing login test.' <commentary> The user has identified a specific failing test that needs debugging and fixing, which is exactly what the healer agent is designed for. </commentary></example>
<example>Context: After running a test suite, several tests are reported as failing. user: 'Test user-registration.spec.ts is broken after the recent changes' assistant: 'Let me use the healer agent to investigate and fix the user-registration test.' <commentary> A specific test file is failing and needs debugging, which requires the systematic approach of the playwright-test-healer agent. </commentary></example>

View File

@@ -1,11 +0,0 @@
node_modules/
/test-results/
/playwright-report/
/playwright/.cache/
.env
.env.local
dist/
*.log
yarn-error.log
.yarn/cache
.yarn/install-state.gz

View File

@@ -1,257 +0,0 @@
# SigNoz Frontend Automation
E2E tests for SigNoz frontend using Playwright.
## Setup
```bash
# Install dependencies
yarn install
# Install Playwright browsers
yarn install:browsers
# Copy .env.example to .env and configure
cp .env.example .env
# Edit .env with your test credentials
```
## Running Tests
```bash
# Run all tests
yarn test
# Run in UI mode (interactive)
yarn test:ui
# Run in headed mode (see browser)
yarn test:headed
# Debug mode
yarn test:debug
# Run specific browser
yarn test:chromium
yarn test:firefox
yarn test:webkit
# View HTML report
yarn report
# Generate tests with Codegen
yarn codegen
```
## Using Playwright Agents with Cursor
### 🎭 Planner - Create Test Plans
**In Cursor Chat:**
```
@.github/chatmodes/ 🎭 planner.chatmode.md @tests/seed.spec.ts
Follow the planner instructions to create a comprehensive test plan for [feature name]
Save to: specs/[feature-name].md
```
The planner will:
- Use your seed test for context
- Explore the application
- Create a detailed test plan in `specs/[feature].md`
### 🎭 Generator - Generate Tests
**In Cursor Chat:**
```
@.github/chatmodes/🎭 generator.chatmode.md @specs/[feature].md @tests/seed.spec.ts
Follow the generator instructions to generate Playwright tests from the test plan
Save to: tests/[feature]/
```
The generator will:
- Read the test plan
- Create test files in `tests/[feature]/`
- Use proper locators and assertions
- Follow seed.spec.ts patterns
### 🎭 Healer - Fix Failing Tests
**In Cursor Chat:**
```
@.github/chatmodes/🎭 healer.chatmode.md @tests/[feature]/[test].spec.ts
Follow the healer instructions to fix the failing test: [test name]
Error: [paste error message]
```
The healer will:
- Replay failing steps
- Update locators if needed
- Add proper waits
- Re-run until passing
## Directory Structure
```
frontend_automation/
├── .github/
│ └── chatmodes/ # Playwright agent definitions
│ ├── 🎭 planner.chatmode.md
│ ├── 🎭 generator.chatmode.md
│ └── 🎭 healer.chatmode.md
├── .vscode/
│ └── mcp.json # MCP server config
├── specs/ # Test plans (Markdown)
│ └── example-test-plan.md
├── tests/ # Test files (.spec.ts)
│ └── seed.spec.ts
├── utils/ # Utilities and helpers
│ └── login.util.ts
├── .env # Environment variables (git-ignored)
├── .env.example # Environment template
├── .gitignore
├── package.json
├── playwright.config.ts # Playwright configuration
├── tsconfig.json # TypeScript configuration
├── yarn.lock # Yarn lock file
└── README.md
```
## Environment Variables
| Variable | Description | Example |
|----------|-------------|---------|
| `SIGNOZ_E2E_BASE_URL` | Base URL of the application | `https://app.us.staging.signoz.cloud` |
| `SIGNOZ_E2E_USERNAME` | Test user email | `test@example.com` |
| `SIGNOZ_E2E_PASSWORD` | Test user password | `your-password` |
| `SIGNOZ_E2E_API_URL` | API endpoint (optional) | `https://api.us.staging.signoz.cloud` |
## Workflow Example
### Complete Test Creation Flow
```bash
# 1. In Cursor Chat, create test plan
@.github/chatmodes/ 🎭 planner.chatmode.md @tests/seed.spec.ts
Create a test plan for: routing policies feature
Save to: specs/routing-policies.md
# 2. Review the generated plan in specs/routing-policies.md
# Edit if needed
# 3. Generate tests from the plan
@.github/chatmodes/🎭 generator.chatmode.md @specs/routing-policies.md @tests/seed.spec.ts
Generate tests and save to: tests/routing-policies/
# 4. Run the tests
yarn test:ui
# 5. If any test fails, heal it
@.github/chatmodes/🎭 healer.chatmode.md @tests/routing-policies/[failing-test].spec.ts
Fix the failing test
# 6. Re-run to verify
yarn test
```
## CI/CD Integration
Example GitHub Actions workflow:
```yaml
# .github/workflows/e2e.yml
name: E2E Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
with:
node-version: 18
cache: 'yarn'
- name: Install dependencies
run: yarn install
working-directory: frontend_automation
- name: Install Playwright browsers
run: yarn install:browsers
working-directory: frontend_automation
- name: Run tests
run: yarn test
working-directory: frontend_automation
env:
SIGNOZ_E2E_BASE_URL: ${{ secrets.E2E_BASE_URL }}
SIGNOZ_E2E_USERNAME: ${{ secrets.E2E_USERNAME }}
SIGNOZ_E2E_PASSWORD: ${{ secrets.E2E_PASSWORD }}
- uses: actions/upload-artifact@v3
if: always()
with:
name: playwright-report
path: frontend_automation/playwright-report/
retention-days: 30
```
## Best Practices
1. **Start with Seed Test** - Always reference `seed.spec.ts` for patterns
2. **Review Generated Plans** - Edit test plans before generating tests
3. **Use Semantic Locators** - Prefer `getByRole`, `getByLabel` over CSS selectors
4. **Keep Plans Updated** - Update `specs/` when features change
5. **Let Healer Work** - The healer can fix most locator and timing issues
6. **Write Descriptive Tests** - Use clear test names and comments
## Troubleshooting
### Tests Won't Run
- Check `.env` has correct credentials
- Verify `baseURL` is accessible
- Run `yarn test:debug` for detailed output
### Locators Failing
- Use the healer agent to fix them
- Or use Playwright Inspector: `yarn test:debug`
- Check if UI elements have changed
### Authentication Issues
- Verify `ensureLoggedIn()` function works
- Check credentials in `.env`
- Run seed test independently: `yarn test tests/seed.spec.ts`
### Agents Not Working in Cursor
- Ensure you're using `@` to reference chatmode files
- Include seed test in context
- Follow the agent instructions explicitly
## Resources
- [Playwright Documentation](https://playwright.dev)
- [Playwright Agents](https://playwright.dev/docs/test-agents)
- [Playwright Best Practices](https://playwright.dev/docs/best-practices)
- [TypeScript Handbook](https://www.typescriptlang.org/docs/)
## Contributing
When adding new tests:
1. Create a test plan in `specs/` first
2. Use agents to generate tests
3. Review and refine generated code
4. Ensure tests follow existing patterns
5. Add proper documentation
## License
MIT

View File

@@ -1,36 +0,0 @@
{
"name": "signoz-frontend-automation",
"version": "1.0.0",
"description": "E2E tests for SigNoz frontend with Playwright",
"main": "index.js",
"scripts": {
"test": "playwright test",
"test:ui": "playwright test --ui",
"test:headed": "playwright test --headed",
"test:debug": "playwright test --debug",
"test:chromium": "playwright test --project=chromium",
"test:firefox": "playwright test --project=firefox",
"test:webkit": "playwright test --project=webkit",
"report": "playwright show-report",
"codegen": "playwright codegen",
"install:browsers": "playwright install"
},
"keywords": [
"playwright",
"e2e",
"testing",
"signoz"
],
"author": "",
"license": "MIT",
"devDependencies": {
"@playwright/test": "^1.57.0-alpha-2025-10-09",
"@types/node": "^20.0.0",
"dotenv": "^16.0.0",
"typescript": "^5.0.0"
},
"engines": {
"node": ">=18.0.0",
"yarn": ">=1.22.0"
}
}

View File

@@ -1,57 +0,0 @@
import { defineConfig, devices } from "@playwright/test";
import dotenv from "dotenv";
import path from "path";
// Load environment variables
dotenv.config({ path: path.resolve(__dirname, ".env") });
export default defineConfig({
testDir: "./tests",
// Run tests in parallel
fullyParallel: true,
// Fail the build on CI if you accidentally left test.only
forbidOnly: !!process.env.CI,
// Retry on CI only
retries: process.env.CI ? 2 : 0,
// Workers
workers: process.env.CI ? 2 : undefined,
// Reporter
reporter: [
["html"],
["json", { outputFile: "test-results/results.json" }],
["list"],
],
// Shared settings
use: {
baseURL:
process.env.SIGNOZ_E2E_BASE_URL || "https://app.us.staging.signoz.cloud",
trace: "on-first-retry",
screenshot: "only-on-failure",
video: "retain-on-failure",
colorScheme: "dark",
locale: "en-US",
viewport: { width: 1280, height: 720 },
},
// Configure projects for multiple browsers
projects: [
{
name: "chromium",
use: { ...devices["Desktop Chrome"] },
},
{
name: "firefox",
use: { ...devices["Desktop Firefox"] },
},
{
name: "webkit",
use: { ...devices["Desktop Safari"] },
},
],
});

View File

@@ -1,76 +0,0 @@
# Example Feature - Test Plan Template
## Application Overview
[Describe the feature/module being tested. Include key functionality, user flows, and important business logic.]
Example:
> The Routing Policies feature allows users to create, edit, and manage alert routing configurations. Users can define rules that determine how alerts are routed to different channels based on conditions like severity, labels, or alert names.
## Test Scenarios
### 1. [Main Scenario Category]
**Seed:** `tests/seed.spec.ts`
#### 1.1 [Specific Test Case]
**Pre-conditions:**
- User is logged in (handled by seed test)
- [Any other specific setup needed]
**Steps:**
1. Navigate to [specific page/section]
2. Click on [element description]
3. Fill in [field] with "[test data]"
4. Click [button/action]
5. Verify [expected outcome]
**Expected Results:**
- [Expected UI change or behavior]
- [Expected data state]
- [Expected navigation or feedback]
**Data:**
- Input field: "test value"
- Select option: "option name"
#### 1.2 [Another Test Case]
**Steps:**
1. ...
**Expected Results:**
- ...
### 2. [Another Scenario Category]
#### 2.1 [Test Case]
**Steps:**
1. ...
**Expected Results:**
- ...
## Edge Cases
### 3. Error Handling
#### 3.1 Invalid Input
**Steps:**
1. Enter invalid data
2. Attempt to submit
**Expected Results:**
- Error message displayed
- Form not submitted
- User remains on page
## Notes
- [Any special considerations]
- [Known limitations]
- [Areas requiring manual verification]

View File

@@ -1,19 +0,0 @@
import { test, expect } from "@playwright/test";
import { ensureLoggedIn } from "../utils/login.util";
/**
* Seed test for Playwright Agents
*
* This test serves as:
* 1. A foundation for all agent-generated tests
* 2. An example of test structure and patterns
* 3. Initial setup for authentication
*/
test("seed", async ({ page }) => {
// Login to the application
await ensureLoggedIn(page);
// Verify we're on the home page
await expect(page).toHaveURL(/.*\/home/);
await expect(page.getByText("Hello there, Welcome to your")).toBeVisible();
});

View File

@@ -1,24 +0,0 @@
{
"compilerOptions": {
"target": "ES2020",
"module": "commonjs",
"moduleResolution": "node",
"lib": ["ES2020"],
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"types": ["node", "@playwright/test"],
"baseUrl": ".",
"paths": {
"@tests/*": ["tests/*"],
"@utils/*": ["utils/*"],
"@specs/*": ["specs/*"]
},
"outDir": "./dist",
"rootDir": "."
},
"include": ["tests/**/*.ts", "utils/**/*.ts", "playwright.config.ts"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -1,33 +0,0 @@
import { Page } from "@playwright/test";
// Read credentials from environment variables
const username = process.env.SIGNOZ_E2E_USERNAME;
const password = process.env.SIGNOZ_E2E_PASSWORD;
/**
* Ensures the user is logged in. If not, performs the login steps.
*/
export async function ensureLoggedIn(page: Page): Promise<void> {
// If already in home page, return
if (page.url().includes("/home")) {
return;
}
if (!username || !password) {
throw new Error(
"SIGNOZ_E2E_USERNAME and SIGNOZ_E2E_PASSWORD environment variables must be set."
);
}
await page.goto("/login");
await page.getByTestId("email").click();
await page.getByTestId("email").fill(username);
await page.getByTestId("initiate_login").click();
await page.getByTestId("password").click();
await page.getByTestId("password").fill(password);
await page.getByRole("button", { name: "Login" }).click();
await page
.getByText("Hello there, Welcome to your")
.waitFor({ state: "visible" });
}

View File

@@ -1,51 +0,0 @@
# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@playwright/test@^1.57.0-alpha-2025-10-11":
version "1.57.0-alpha-2025-10-11"
resolved "https://registry.yarnpkg.com/@playwright/test/-/test-1.57.0-alpha-2025-10-11.tgz#75f6fac2f98fcff6e4bae1c907b48ad0b1a33bea"
integrity sha512-xqp2RNcLCPSUAYCrP3+rYZ4LFlESvWqjjpFegjNbun7wLcGvUt9Mh+RHBvgeZAhMxxuVde78XO9Y888UYFH9ew==
dependencies:
playwright "1.57.0-alpha-2025-10-11"
"@types/node@^24.7.1":
version "24.7.1"
resolved "https://registry.yarnpkg.com/@types/node/-/node-24.7.1.tgz#3f0b17eddcd965c9e337af22459b04bafbf96e5e"
integrity sha512-CmyhGZanP88uuC5GpWU9q+fI61j2SkhO3UGMUdfYRE6Bcy0ccyzn1Rqj9YAB/ZY4kOXmNf0ocah5GtphmLMP6Q==
dependencies:
undici-types "~7.14.0"
dotenv@^17.2.3:
version "17.2.3"
resolved "https://registry.yarnpkg.com/dotenv/-/dotenv-17.2.3.tgz#ad995d6997f639b11065f419a22fabf567cdb9a2"
integrity sha512-JVUnt+DUIzu87TABbhPmNfVdBDt18BLOWjMUFJMSi/Qqg7NTYtabbvSNJGOJ7afbRuv9D/lngizHtP7QyLQ+9w==
fsevents@2.3.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
playwright-core@1.57.0-alpha-2025-10-11:
version "1.57.0-alpha-2025-10-11"
resolved "https://registry.yarnpkg.com/playwright-core/-/playwright-core-1.57.0-alpha-2025-10-11.tgz#b444b75542a43bc354e512d076344d9258997e9c"
integrity sha512-X6KAunryZlslAdEdlN5gIIP3sFU6Uot3vzLoGCZ9SNv0JvXd6e2g7ArjnpOQld36yKszq8J+wQJRlIvdXkIvRw==
playwright@1.57.0-alpha-2025-10-11:
version "1.57.0-alpha-2025-10-11"
resolved "https://registry.yarnpkg.com/playwright/-/playwright-1.57.0-alpha-2025-10-11.tgz#af67a28d64b39fe57eea454b2b312e98bfff02a5"
integrity sha512-a80kAd59up/kURcKE7THLzx3lN6a1G9RhsgP9ZfLGL7WtnOhOdRLxbHwmjWUG11ybEDeYNpj1qwT02MT4R+rew==
dependencies:
playwright-core "1.57.0-alpha-2025-10-11"
optionalDependencies:
fsevents "2.3.2"
typescript@^5.9.3:
version "5.9.3"
resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.9.3.tgz#5b4f59e15310ab17a216f5d6cf53ee476ede670f"
integrity sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==
undici-types@~7.14.0:
version "7.14.0"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-7.14.0.tgz#4c037b32ca4d7d62fae042174604341588bc0840"
integrity sha512-QQiYxHuyZ9gQUIrmPo3IA+hUl4KYk8uSA7cHrcKd/l3p1OTpZcM0Tbp9x7FAtXdAYhlasd60ncPpgu6ihG6TOA==

View File

@@ -5,6 +5,7 @@ import (
"github.com/SigNoz/signoz/pkg/factory"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
)
@@ -15,11 +16,14 @@ type AuthZ interface {
Check(context.Context, *openfgav1.TupleKey) error
// CheckWithTupleCreation takes upon the responsibility for generating the tuples alongside everything Check does.
CheckWithTupleCreation(context.Context, authtypes.Claims, authtypes.Relation, authtypes.Typeable, []authtypes.Selector) error
CheckWithTupleCreation(context.Context, authtypes.Claims, valuer.UUID, authtypes.Relation, authtypes.Relation, authtypes.Typeable, []authtypes.Selector) error
// writes the tuples to upstream server
Write(context.Context, *openfgav1.WriteRequest) error
// Batch Check returns error when the upstream authorization server is unavailable or for all the tuples of subject (s) doesn't have relation (r) on object (o).
BatchCheck(context.Context, []*openfgav1.TupleKey) error
// lists the selectors for objects assigned to subject (s) with relation (r) on resource (s)
// Write accepts the insertion tuples and the deletion tuples.
Write(context.Context, []*openfgav1.TupleKey, []*openfgav1.TupleKey) error
// Lists the selectors for objects assigned to subject (s) with relation (r) on resource (s)
ListObjects(context.Context, string, authtypes.Relation, authtypes.Typeable) ([]*authtypes.Object, error)
}

View File

@@ -232,13 +232,13 @@ func (provider *provider) BatchCheck(ctx context.Context, tupleReq []*openfgav1.
}
func (provider *provider) CheckWithTupleCreation(ctx context.Context, claims authtypes.Claims, relation authtypes.Relation, typeable authtypes.Typeable, selectors []authtypes.Selector) error {
func (provider *provider) CheckWithTupleCreation(ctx context.Context, claims authtypes.Claims, orgID valuer.UUID, _ authtypes.Relation, translation authtypes.Relation, _ authtypes.Typeable, _ []authtypes.Selector) error {
subject, err := authtypes.NewSubject(authtypes.TypeUser, claims.UserID, authtypes.Relation{})
if err != nil {
return err
}
tuples, err := typeable.Tuples(subject, relation, selectors)
tuples, err := authtypes.TypeableOrganization.Tuples(subject, translation, []authtypes.Selector{authtypes.MustNewSelector(authtypes.TypeOrganization, orgID.StringValue())}, orgID)
if err != nil {
return err
}
@@ -251,11 +251,21 @@ func (provider *provider) CheckWithTupleCreation(ctx context.Context, claims aut
return nil
}
func (provider *provider) Write(ctx context.Context, req *openfgav1.WriteRequest) error {
func (provider *provider) Write(ctx context.Context, additions []*openfgav1.TupleKey, deletions []*openfgav1.TupleKey) error {
deletionTuplesWithoutCondition := make([]*openfgav1.TupleKeyWithoutCondition, len(deletions))
for idx, tuple := range deletions {
deletionTuplesWithoutCondition[idx] = &openfgav1.TupleKeyWithoutCondition{User: tuple.User, Object: tuple.Object, Relation: tuple.Relation}
}
_, err := provider.openfgaServer.Write(ctx, &openfgav1.WriteRequest{
StoreId: provider.storeID,
AuthorizationModelId: provider.modelID,
Writes: req.Writes,
Writes: &openfgav1.WriteRequestWrites{
TupleKeys: additions,
},
Deletes: &openfgav1.WriteRequestDeletes{
TupleKeys: deletionTuplesWithoutCondition,
},
})
return err

View File

@@ -7,6 +7,7 @@ import (
"github.com/SigNoz/signoz/pkg/authz"
"github.com/SigNoz/signoz/pkg/http/render"
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/valuer"
"github.com/gorilla/mux"
)
@@ -106,7 +107,7 @@ func (middleware *AuthZ) OpenAccess(next http.HandlerFunc) http.HandlerFunc {
})
}
func (middleware *AuthZ) Check(next http.HandlerFunc, _ authtypes.Relation, translation authtypes.Relation, _ authtypes.Typeable, _ authtypes.Typeable, _ authtypes.SelectorCallbackFn) http.HandlerFunc {
func (middleware *AuthZ) Check(next http.HandlerFunc, relation authtypes.Relation, translation authtypes.Relation, typeable authtypes.Typeable, cb authtypes.SelectorCallbackFn) http.HandlerFunc {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
claims, err := authtypes.ClaimsFromContext(req.Context())
if err != nil {
@@ -114,7 +115,19 @@ func (middleware *AuthZ) Check(next http.HandlerFunc, _ authtypes.Relation, tran
return
}
err = middleware.authzService.CheckWithTupleCreation(req.Context(), claims, translation, authtypes.TypeableOrganization, []authtypes.Selector{authtypes.MustNewSelector(authtypes.TypeOrganization, claims.OrgID)})
orgId, err := valuer.NewUUID(claims.OrgID)
if err != nil {
render.Error(rw, err)
return
}
selectors, err := cb(req.Context(), claims)
if err != nil {
render.Error(rw, err)
return
}
err = middleware.authzService.CheckWithTupleCreation(req.Context(), claims, orgId, relation, translation, typeable, selectors)
if err != nil {
render.Error(rw, err)
return

View File

@@ -27,6 +27,7 @@ type Module interface {
GetByMetricNames(ctx context.Context, orgID valuer.UUID, metricNames []string) (map[string][]map[string]string, error)
statsreporter.StatsCollector
role.RegisterTypeable
}

View File

@@ -225,5 +225,5 @@ func (module *module) Collect(ctx context.Context, orgID valuer.UUID) (map[strin
}
func (module *module) MustGetTypeables() []authtypes.Typeable {
return []authtypes.Typeable{dashboardtypes.ResourceDashboard, dashboardtypes.ResourcesDashboards}
return []authtypes.Typeable{dashboardtypes.TypeableResourceDashboard, dashboardtypes.TypeableResourcesDashboards}
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/SigNoz/signoz/pkg/types/authtypes"
"github.com/SigNoz/signoz/pkg/types/roletypes"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
)
type module struct {
@@ -47,6 +46,8 @@ func (module *module) GetResources(_ context.Context) []*authtypes.Resource {
for _, register := range module.registry {
typeables = append(typeables, register.MustGetTypeables()...)
}
// role module cannot self register itself!
typeables = append(typeables, module.MustGetTypeables()...)
resources := make([]*authtypes.Resource, 0)
for _, typeable := range typeables {
@@ -142,24 +143,17 @@ func (module *module) Patch(ctx context.Context, orgID valuer.UUID, id valuer.UU
}
func (module *module) PatchObjects(ctx context.Context, orgID valuer.UUID, id valuer.UUID, relation authtypes.Relation, additions, deletions []*authtypes.Object) error {
additionTuples, err := roletypes.GetAdditionTuples(id, relation, additions)
additionTuples, err := roletypes.GetAdditionTuples(id, orgID, relation, additions)
if err != nil {
return err
}
deletionTuples, err := roletypes.GetDeletionTuples(id, relation, deletions)
deletionTuples, err := roletypes.GetDeletionTuples(id, orgID, relation, deletions)
if err != nil {
return err
}
err = module.authz.Write(ctx, &openfgav1.WriteRequest{
Writes: &openfgav1.WriteRequestWrites{
TupleKeys: additionTuples,
},
Deletes: &openfgav1.WriteRequestDeletes{
TupleKeys: deletionTuples,
},
})
err = module.authz.Write(ctx, additionTuples, deletionTuples)
if err != nil {
return err
}
@@ -170,3 +164,7 @@ func (module *module) PatchObjects(ctx context.Context, orgID valuer.UUID, id va
func (module *module) Delete(ctx context.Context, orgID valuer.UUID, id valuer.UUID) error {
return module.store.Delete(ctx, orgID, id)
}
func (module *module) MustGetTypeables() []authtypes.Typeable {
return []authtypes.Typeable{authtypes.TypeableRole, roletypes.TypeableResourcesRoles}
}

View File

@@ -33,6 +33,8 @@ type Module interface {
// Deletes the role metadata and tuples in authorization server
Delete(context.Context, valuer.UUID, valuer.UUID) error
RegisterTypeable
}
type RegisterTypeable interface {
@@ -40,27 +42,19 @@ type RegisterTypeable interface {
}
type Handler interface {
// Creates the role metadata and tuples in authorization server
Create(http.ResponseWriter, *http.Request)
// Gets the role metadata
Get(http.ResponseWriter, *http.Request)
// Gets the objects for the given relation and role
GetObjects(http.ResponseWriter, *http.Request)
// Gets all the resources and the relations
GetResources(http.ResponseWriter, *http.Request)
// Lists all the roles metadata for the organization
List(http.ResponseWriter, *http.Request)
// Patches the role metdata
Patch(http.ResponseWriter, *http.Request)
// Patches the objects for the given relation and role
PatchObjects(http.ResponseWriter, *http.Request)
// Deletes the role metadata and tuples in authorization server
Delete(http.ResponseWriter, *http.Request)
}

View File

@@ -94,7 +94,7 @@ func CollisionHandledFinalExpr(
return "", nil, err
}
colName, _ = fm.FieldFor(ctx, key)
colName, _ = telemetrytypes.DataTypeCollisionHandledFieldName(key, dummyValue, colName)
colName, _ = DataTypeCollisionHandledFieldName(key, dummyValue, colName, qbtypes.FilterOperatorUnknown)
stmts = append(stmts, colName)
}
}
@@ -109,7 +109,7 @@ func CollisionHandledFinalExpr(
return "", nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "Group by/Aggregation isn't available for the body column")
// colName, _ = jsonKeyToKey(context.Background(), field, qbtypes.FilterOperatorUnknown, dummyValue)
} else {
colName, _ = telemetrytypes.DataTypeCollisionHandledFieldName(field, dummyValue, colName)
colName, _ = DataTypeCollisionHandledFieldName(field, dummyValue, colName, qbtypes.FilterOperatorUnknown)
}
stmts = append(stmts, colName)
@@ -194,3 +194,109 @@ func FormatFullTextSearch(input string) string {
}
return input
}
func DataTypeCollisionHandledFieldName(key *telemetrytypes.TelemetryFieldKey, value any, tblFieldName string, operator qbtypes.FilterOperator) (string, any) {
// This block of code exists to handle the data type collisions
// We don't want to fail the requests when there is a key with more than one data type
// Let's take an example of `http.status_code`, and consider user sent a string value and number value
// When they search for `http.status_code=200`, we will search across both the number columns and string columns
// and return the results from both the columns
// While we expect user not to send the mixed data types, it inevitably happens
// So we handle the data type collisions here
switch key.FieldDataType {
case telemetrytypes.FieldDataTypeString:
switch v := value.(type) {
case float64:
// try to convert the string value to to number
tblFieldName = castFloat(tblFieldName)
case []any:
if allFloats(v) {
tblFieldName = castFloat(tblFieldName)
} else if hasString(v) {
_, value = castString(tblFieldName), toStrings(v)
}
case bool:
// we don't have a toBoolOrNull in ClickHouse, so we need to convert the bool to a string
value = fmt.Sprintf("%t", v)
}
case telemetrytypes.FieldDataTypeFloat64, telemetrytypes.FieldDataTypeInt64, telemetrytypes.FieldDataTypeNumber:
switch v := value.(type) {
// why? ; CH returns an error for a simple check
// attributes_number['http.status_code'] = 200 but not for attributes_number['http.status_code'] >= 200
// DB::Exception: Bad get: has UInt64, requested Float64.
// How is it working in v4? v4 prepares the full query with values in query string
// When we format the float it becomes attributes_number['http.status_code'] = 200.000
// Which CH gladly accepts and doesn't throw error
// However, when passed as query args, the default formatter
// https://github.com/ClickHouse/clickhouse-go/blob/757e102f6d8c6059d564ce98795b4ce2a101b1a5/bind.go#L393
// is used which prepares the
// final query as attributes_number['http.status_code'] = 200 giving this error
// This following is one way to workaround it
// if the key is a number, the value is a string, we will let clickHouse handle the conversion
case float32, float64:
tblFieldName = castFloatHack(tblFieldName)
case string:
// check if it's a number inside a string
isNumber := false
if _, err := strconv.ParseFloat(v, 64); err == nil {
isNumber = true
}
if !operator.IsComparisonOperator() || !isNumber {
// try to convert the number attribute to string
tblFieldName = castString(tblFieldName) // numeric col vs string literal
} else {
tblFieldName = castFloatHack(tblFieldName)
}
case []any:
if allFloats(v) {
tblFieldName = castFloatHack(tblFieldName)
} else if hasString(v) {
tblFieldName, value = castString(tblFieldName), toStrings(v)
}
}
case telemetrytypes.FieldDataTypeBool:
switch v := value.(type) {
case string:
tblFieldName = castString(tblFieldName)
case []any:
if hasString(v) {
tblFieldName, value = castString(tblFieldName), toStrings(v)
}
}
}
return tblFieldName, value
}
func castFloat(col string) string { return fmt.Sprintf("toFloat64OrNull(%s)", col) }
func castFloatHack(col string) string { return fmt.Sprintf("toFloat64(%s)", col) }
func castString(col string) string { return fmt.Sprintf("toString(%s)", col) }
func allFloats(in []any) bool {
for _, x := range in {
if _, ok := x.(float64); !ok {
return false
}
}
return true
}
func hasString(in []any) bool {
for _, x := range in {
if _, ok := x.(string); ok {
return true
}
}
return false
}
func toStrings(in []any) []any {
out := make([]any, len(in))
for i, x := range in {
out[i] = fmt.Sprintf("%v", x)
}
return out
}

View File

@@ -0,0 +1,165 @@
package querybuilder
import (
"testing"
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
"github.com/stretchr/testify/assert"
)
func TestDataTypeCollisionHandledFieldName(t *testing.T) {
tests := []struct {
name string
key *telemetrytypes.TelemetryFieldKey
value any
tblFieldName string
expectedFieldName string
expectedValue any
operator qbtypes.FilterOperator
}{
{
name: "http_status_code_string_field_with_numeric_value",
key: &telemetrytypes.TelemetryFieldKey{
Name: "http.status_code",
FieldDataType: telemetrytypes.FieldDataTypeString,
},
value: float64(200),
tblFieldName: "attribute_string_http$$status_code",
expectedFieldName: "toFloat64OrNull(attribute_string_http$$status_code)",
expectedValue: float64(200),
},
{
name: "service_enabled_string_field_with_bool_value",
key: &telemetrytypes.TelemetryFieldKey{
Name: "service.enabled",
FieldDataType: telemetrytypes.FieldDataTypeString,
},
value: true,
tblFieldName: "attribute_string_service$$enabled",
expectedFieldName: "attribute_string_service$$enabled",
expectedValue: "true",
},
{
name: "http_method_string_field_with_string_value",
key: &telemetrytypes.TelemetryFieldKey{
Name: "http.method",
FieldDataType: telemetrytypes.FieldDataTypeString,
},
value: "GET",
tblFieldName: "attribute_string_http$$method",
expectedFieldName: "attribute_string_http$$method",
expectedValue: "GET",
},
{
name: "response_times_string_field_with_numeric_array",
key: &telemetrytypes.TelemetryFieldKey{
Name: "response.times",
FieldDataType: telemetrytypes.FieldDataTypeString,
},
value: []any{float64(100.5), float64(200.3), float64(150.7)},
tblFieldName: "attribute_string_response$$times",
expectedFieldName: "toFloat64OrNull(attribute_string_response$$times)",
expectedValue: []any{float64(100.5), float64(200.3), float64(150.7)},
},
{
name: "error_codes_string_field_with_mixed_array",
key: &telemetrytypes.TelemetryFieldKey{
Name: "error.codes",
FieldDataType: telemetrytypes.FieldDataTypeString,
},
value: []any{float64(500), "TIMEOUT", float64(503)},
tblFieldName: "attribute_string_error$$codes",
expectedFieldName: "attribute_string_error$$codes",
expectedValue: []any{"500", "TIMEOUT", "503"},
},
// numbers
{
// we cast the key to string if the value is not a number or operator is not a comparison operator
name: "http_request_duration_float_field_with_string_value",
key: &telemetrytypes.TelemetryFieldKey{
Name: "http.request.duration",
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
},
value: "1234.56",
tblFieldName: "attribute_float64_http$$request$$duration",
expectedFieldName: "toString(attribute_float64_http$$request$$duration)",
expectedValue: "1234.56",
operator: qbtypes.FilterOperatorEqual,
},
{
// we cast to float64 if it's a comparison operator and the value is a stringified number
// reason:- https://github.com/SigNoz/signoz/pull/9154#issuecomment-3369941207
name: "http_request_duration_float_field_with_string_value_comparison_operator",
key: &telemetrytypes.TelemetryFieldKey{
Name: "http.request.duration",
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
},
value: "9",
tblFieldName: "attribute_float64_http$$request$$duration",
expectedFieldName: "toFloat64(attribute_float64_http$$request$$duration)",
expectedValue: "9",
operator: qbtypes.FilterOperatorGreaterThan,
},
{
// we cast to float64 if it's a comparison operator and the value is a stringified number
// reason:- https://github.com/SigNoz/signoz/pull/9154#issuecomment-3369941207
name: "http_request_duration_float_field_with_string_value_comparison_operator_1",
key: &telemetrytypes.TelemetryFieldKey{
Name: "http.request.duration",
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
},
value: "9.11",
tblFieldName: "attribute_float64_http$$request$$duration",
expectedFieldName: "toFloat64(attribute_float64_http$$request$$duration)",
expectedValue: "9.11",
operator: qbtypes.FilterOperatorGreaterThan,
},
{
// we cast the key to string if the value is not a number or operator is not a comparison operator
name: "http_request_duration_float_field_with_string_value_comparison_operator_2",
key: &telemetrytypes.TelemetryFieldKey{
Name: "http.request.duration",
FieldDataType: telemetrytypes.FieldDataTypeFloat64,
},
value: "ERROR",
tblFieldName: "attribute_float64_http$$request$$duration",
expectedFieldName: "toString(attribute_float64_http$$request$$duration)",
expectedValue: "ERROR",
operator: qbtypes.FilterOperatorGreaterThan,
},
// bools
{
name: "feature_enabled_bool_field_with_string_value",
key: &telemetrytypes.TelemetryFieldKey{
Name: "feature.enabled",
FieldDataType: telemetrytypes.FieldDataTypeBool,
},
value: "true",
tblFieldName: "attribute_bool_feature$$enabled",
expectedFieldName: "toString(attribute_bool_feature$$enabled)",
expectedValue: "true",
},
{
name: "feature_flags_bool_field_with_mixed_array",
key: &telemetrytypes.TelemetryFieldKey{
Name: "feature.flags",
FieldDataType: telemetrytypes.FieldDataTypeBool,
},
value: []any{true, "enabled", false},
tblFieldName: "attribute_bool_feature$$flags",
expectedFieldName: "toString(attribute_bool_feature$$flags)",
expectedValue: []any{"true", "enabled", "false"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resultFieldName, resultValue := DataTypeCollisionHandledFieldName(tt.key, tt.value, tt.tblFieldName, tt.operator)
assert.Equal(t, tt.expectedFieldName, resultFieldName)
assert.Equal(t, tt.expectedValue, resultValue)
})
}
}

View File

@@ -55,7 +55,7 @@ func (c *conditionBuilder) conditionFor(
tblFieldName, value = GetBodyJSONKey(ctx, key, operator, value)
}
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
tblFieldName, value = querybuilder.DataTypeCollisionHandledFieldName(key, value, tblFieldName, operator)
// make use of case insensitive index for body
if tblFieldName == "body" {

View File

@@ -424,6 +424,16 @@ func TestFilterExprLogs(t *testing.T) {
expectedArgs: []any{"critical", "resolved", "open", true},
expectedErrorContains: "",
},
{
// this will result in failure from the DB side.
// user will have to use attribute.status:string > open
category: "FREETEXT with conditions",
query: "critical NOT resolved status > open",
shouldPass: true,
expectedQuery: "WHERE (match(LOWER(body), LOWER(?)) AND NOT (match(LOWER(body), LOWER(?))) AND (toString(attributes_number['status']) > ? AND mapContains(attributes_number, 'status') = ?))",
expectedArgs: []any{"critical", "resolved", "open", true},
expectedErrorContains: "",
},
{
category: "FREETEXT with conditions",
query: "database error type=mysql",

View File

@@ -55,7 +55,7 @@ func (c *conditionBuilder) ConditionFor(
return "", nil
}
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
tblFieldName, value = querybuilder.DataTypeCollisionHandledFieldName(key, value, tblFieldName, operator)
// key must exists to apply main filter
expr := `if(mapContains(%s, %s), %s, true)`

View File

@@ -73,7 +73,7 @@ func (c *conditionBuilder) conditionFor(
}
}
} else {
tblFieldName, value = telemetrytypes.DataTypeCollisionHandledFieldName(key, value, tblFieldName)
tblFieldName, value = querybuilder.DataTypeCollisionHandledFieldName(key, value, tblFieldName, operator)
}
// regular operators

View File

@@ -17,7 +17,8 @@ var (
typeRoleSelectorRegex = regexp.MustCompile(`^[0-9a-f]{8}(?:\-[0-9a-f]{4}){3}-[0-9a-f]{12}$`)
typeOrganizationSelectorRegex = regexp.MustCompile(`^[0-9a-f]{8}(?:\-[0-9a-f]{4}){3}-[0-9a-f]{12}$`)
typeResourceSelectorRegex = regexp.MustCompile(`^[0-9a-f]{8}(?:\-[0-9a-f]{4}){3}-[0-9a-f]{12}$`)
typeResourcesSelectorRegex = regexp.MustCompile(`^org/[0-9a-f]{8}(?:\-[0-9a-f]{4}){3}-[0-9a-f]{12}$`)
// resources selectors are used to select either all or none
typeResourcesSelectorRegex = regexp.MustCompile(`^\*$`)
)
type SelectorCallbackFn func(context.Context, Claims) ([]Selector, error)
@@ -27,7 +28,7 @@ type Selector struct {
}
func NewSelector(typed Type, selector string) (Selector, error) {
err := IsValidSelector(typed, Selector{val: selector})
err := IsValidSelector(typed, selector)
if err != nil {
return Selector{}, err
}
@@ -35,26 +36,26 @@ func NewSelector(typed Type, selector string) (Selector, error) {
return Selector{val: selector}, nil
}
func IsValidSelector(typed Type, selector Selector) error {
func IsValidSelector(typed Type, selector string) error {
switch typed {
case TypeUser:
if !typeUserSelectorRegex.MatchString(selector.String()) {
if !typeUserSelectorRegex.MatchString(selector) {
return errors.Newf(errors.TypeInvalidInput, ErrCodeAuthZInvalidSelectorRegex, "selector must conform to regex %s", typeUserSelectorRegex.String())
}
case TypeRole:
if !typeRoleSelectorRegex.MatchString(selector.String()) {
if !typeRoleSelectorRegex.MatchString(selector) {
return errors.Newf(errors.TypeInvalidInput, ErrCodeAuthZInvalidSelectorRegex, "selector must conform to regex %s", typeRoleSelectorRegex.String())
}
case TypeOrganization:
if !typeOrganizationSelectorRegex.MatchString(selector.String()) {
if !typeOrganizationSelectorRegex.MatchString(selector) {
return errors.Newf(errors.TypeInvalidInput, ErrCodeAuthZInvalidSelectorRegex, "selector must conform to regex %s", typeOrganizationSelectorRegex.String())
}
case TypeResource:
if !typeResourceSelectorRegex.MatchString(selector.String()) {
if !typeResourceSelectorRegex.MatchString(selector) {
return errors.Newf(errors.TypeInvalidInput, ErrCodeAuthZInvalidSelectorRegex, "selector must conform to regex %s", typeResourceSelectorRegex.String())
}
case TypeResources:
if !typeResourcesSelectorRegex.MatchString(selector.String()) {
if !typeResourcesSelectorRegex.MatchString(selector) {
return errors.Newf(errors.TypeInvalidInput, ErrCodeAuthZInvalidSelectorRegex, "selector must conform to regex %s", typeResourcesSelectorRegex.String())
}
}

View File

@@ -24,7 +24,7 @@ type Transaction struct {
}
func NewObject(resource Resource, selector Selector) (*Object, error) {
err := IsValidSelector(resource.Type, selector)
err := IsValidSelector(resource.Type, selector.val)
if err != nil {
return nil, err
}

View File

@@ -30,8 +30,8 @@ var (
type Typeable interface {
Type() Type
Name() Name
Prefix() string
Tuples(subject string, relation Relation, selector []Selector) ([]*openfgav1.TupleKey, error)
Prefix(orgId valuer.UUID) string
Tuples(subject string, relation Relation, selector []Selector, orgID valuer.UUID) ([]*openfgav1.TupleKey, error)
}
type Type struct{ valuer.String }

View File

@@ -3,6 +3,7 @@ package authtypes
import (
"strings"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
)
@@ -10,7 +11,7 @@ var _ Typeable = new(typeableOrganization)
type typeableOrganization struct{}
func (typeableOrganization *typeableOrganization) Tuples(subject string, relation Relation, selector []Selector) ([]*openfgav1.TupleKey, error) {
func (typeableOrganization *typeableOrganization) Tuples(subject string, relation Relation, selector []Selector, _ valuer.UUID) ([]*openfgav1.TupleKey, error) {
tuples := make([]*openfgav1.TupleKey, 0)
for _, selector := range selector {
object := strings.Join([]string{typeableOrganization.Type().StringValue(), selector.String()}, ":")
@@ -28,6 +29,6 @@ func (typeableOrganization *typeableOrganization) Name() Name {
return MustNewName("organization")
}
func (typeableOrganization *typeableOrganization) Prefix() string {
func (typeableOrganization *typeableOrganization) Prefix(_ valuer.UUID) string {
return typeableOrganization.Type().StringValue()
}

View File

@@ -1,8 +1,7 @@
package authtypes
import (
"strings"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
)
@@ -24,10 +23,10 @@ func MustNewTypeableResource(name Name) Typeable {
return typeableesource
}
func (typeableResource *typeableResource) Tuples(subject string, relation Relation, selector []Selector) ([]*openfgav1.TupleKey, error) {
func (typeableResource *typeableResource) Tuples(subject string, relation Relation, selector []Selector, orgID valuer.UUID) ([]*openfgav1.TupleKey, error) {
tuples := make([]*openfgav1.TupleKey, 0)
for _, selector := range selector {
object := typeableResource.Prefix() + "/" + selector.String()
object := typeableResource.Prefix(orgID) + "/" + selector.String()
tuples = append(tuples, &openfgav1.TupleKey{User: subject, Relation: relation.StringValue(), Object: object})
}
@@ -42,6 +41,7 @@ func (typeableResource *typeableResource) Name() Name {
return typeableResource.name
}
func (typeableResource *typeableResource) Prefix() string {
return strings.Join([]string{typeableResource.Type().StringValue(), typeableResource.Name().String()}, ":")
func (typeableResource *typeableResource) Prefix(orgID valuer.UUID) string {
// example: resource:organization/0199c47d-f61b-7833-bc5f-c0730f12f046/dashboard
return typeableResource.Type().StringValue() + ":" + "organization" + "/" + orgID.StringValue() + "/" + typeableResource.Name().String()
}

View File

@@ -1,8 +1,7 @@
package authtypes
import (
"strings"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
)
@@ -24,10 +23,10 @@ func MustNewTypeableResources(name Name) Typeable {
return resources
}
func (typeableResources *typeableResources) Tuples(subject string, relation Relation, selector []Selector) ([]*openfgav1.TupleKey, error) {
func (typeableResources *typeableResources) Tuples(subject string, relation Relation, selector []Selector, orgID valuer.UUID) ([]*openfgav1.TupleKey, error) {
tuples := make([]*openfgav1.TupleKey, 0)
for _, selector := range selector {
object := typeableResources.Prefix() + "/" + selector.String()
object := typeableResources.Prefix(orgID) + "/" + selector.String()
tuples = append(tuples, &openfgav1.TupleKey{User: subject, Relation: relation.StringValue(), Object: object})
}
@@ -42,6 +41,7 @@ func (typeableResources *typeableResources) Name() Name {
return typeableResources.name
}
func (typeableResources *typeableResources) Prefix() string {
return strings.Join([]string{typeableResources.Type().StringValue(), typeableResources.Name().String()}, ":")
func (typeableResources *typeableResources) Prefix(orgID valuer.UUID) string {
// example: resources:organization/0199c47d-f61b-7833-bc5f-c0730f12f046/dashboards
return typeableResources.Type().StringValue() + ":" + "organization" + "/" + orgID.StringValue() + "/" + typeableResources.Name().String()
}

View File

@@ -1,8 +1,7 @@
package authtypes
import (
"strings"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
)
@@ -10,10 +9,10 @@ var _ Typeable = new(typeableRole)
type typeableRole struct{}
func (typeableRole *typeableRole) Tuples(subject string, relation Relation, selector []Selector) ([]*openfgav1.TupleKey, error) {
func (typeableRole *typeableRole) Tuples(subject string, relation Relation, selector []Selector, orgID valuer.UUID) ([]*openfgav1.TupleKey, error) {
tuples := make([]*openfgav1.TupleKey, 0)
for _, selector := range selector {
object := strings.Join([]string{typeableRole.Type().StringValue(), selector.String()}, ":")
object := typeableRole.Prefix(orgID) + "/" + selector.String()
tuples = append(tuples, &openfgav1.TupleKey{User: subject, Relation: relation.StringValue(), Object: object})
}
@@ -28,6 +27,7 @@ func (typeableRole *typeableRole) Name() Name {
return MustNewName("role")
}
func (typeableRole *typeableRole) Prefix() string {
return typeableRole.Type().StringValue()
func (typeableRole *typeableRole) Prefix(orgID valuer.UUID) string {
// example: role:organization/0199c47d-f61b-7833-bc5f-c0730f12f046/role
return typeableRole.Type().StringValue() + ":" + "organization" + "/" + orgID.StringValue() + "/" + typeableRole.Name().String()
}

View File

@@ -1,8 +1,7 @@
package authtypes
import (
"strings"
"github.com/SigNoz/signoz/pkg/valuer"
openfgav1 "github.com/openfga/api/proto/openfga/v1"
)
@@ -10,10 +9,10 @@ var _ Typeable = new(typeableUser)
type typeableUser struct{}
func (typeableUser *typeableUser) Tuples(subject string, relation Relation, selector []Selector) ([]*openfgav1.TupleKey, error) {
func (typeableUser *typeableUser) Tuples(subject string, relation Relation, selector []Selector, orgID valuer.UUID) ([]*openfgav1.TupleKey, error) {
tuples := make([]*openfgav1.TupleKey, 0)
for _, selector := range selector {
object := strings.Join([]string{typeableUser.Type().StringValue(), selector.String()}, ":")
object := typeableUser.Prefix(orgID) + "/" + selector.String()
tuples = append(tuples, &openfgav1.TupleKey{User: subject, Relation: relation.StringValue(), Object: object})
}
@@ -28,6 +27,7 @@ func (typeableUser *typeableUser) Name() Name {
return MustNewName("user")
}
func (typeableUser *typeableUser) Prefix() string {
return typeableUser.Type().StringValue()
func (typeableUser *typeableUser) Prefix(orgID valuer.UUID) string {
// example: user:organization/0199c47d-f61b-7833-bc5f-c0730f12f046/user
return typeableUser.Type().StringValue() + ":" + "organization" + "/" + orgID.StringValue() + "/" + typeableUser.Name().String()
}

View File

@@ -13,8 +13,8 @@ import (
)
var (
ResourceDashboard = authtypes.MustNewTypeableResource(authtypes.MustNewName("dashboard"))
ResourcesDashboards = authtypes.MustNewTypeableResources(authtypes.MustNewName("dashboards"))
TypeableResourceDashboard = authtypes.MustNewTypeableResource(authtypes.MustNewName("dashboard"))
TypeableResourcesDashboards = authtypes.MustNewTypeableResources(authtypes.MustNewName("dashboards"))
)
type StorableDashboard struct {

View File

@@ -137,6 +137,14 @@ func (f FilterOperator) IsNegativeOperator() bool {
return true
}
func (f FilterOperator) IsComparisonOperator() bool {
switch f {
case FilterOperatorGreaterThan, FilterOperatorGreaterThanOrEq, FilterOperatorLessThan, FilterOperatorLessThanOrEq:
return true
}
return false
}
type OrderDirection struct {
valuer.String
}

View File

@@ -134,6 +134,11 @@ func (q *QueryBuilderQuery[T]) Validate(requestType RequestType) error {
return err
}
// Validate GroupBy
if err := q.validateGroupByFields(); err != nil {
return err
}
if requestType != RequestTypeRaw && requestType != RequestTypeTrace && len(q.Aggregations) > 0 {
if err := q.validateOrderByForAggregation(); err != nil {
return err
@@ -168,6 +173,27 @@ func (q *QueryBuilderQuery[T]) validateSelectFields() error {
"isRoot and isEntryPoint fields are not supported in selectFields",
)
}
// for logs the selectFields is not present.
// in traces, timestamp is added by default, so it will conflict with timestamp attribute.
if v.Name == "timestamp" {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"timestamp field is not supported in selectFields, it's added by default where needed",
)
}
}
return nil
}
func (q *QueryBuilderQuery[T]) validateGroupByFields() error {
for _, v := range q.GroupBy {
if v.Name == "timestamp" {
return errors.NewInvalidInputf(
errors.CodeInvalidInput,
"timestamp field is not supported in groupBy, it's added by default where needed",
)
}
}
return nil
}

View File

@@ -21,6 +21,10 @@ var (
ErrCodeRoleFailedTransactionsFromString = errors.MustNewCode("role_failed_transactions_from_string")
)
var (
TypeableResourcesRoles = authtypes.MustNewTypeableResources(authtypes.MustNewName("roles"))
)
type StorableRole struct {
bun.BaseModel `bun:"table:role"`
@@ -166,7 +170,7 @@ func (role *PatchableRole) UnmarshalJSON(data []byte) error {
return nil
}
func GetAdditionTuples(id valuer.UUID, relation authtypes.Relation, additions []*authtypes.Object) ([]*openfgav1.TupleKey, error) {
func GetAdditionTuples(id valuer.UUID, orgID valuer.UUID, relation authtypes.Relation, additions []*authtypes.Object) ([]*openfgav1.TupleKey, error) {
tuples := make([]*openfgav1.TupleKey, 0)
for _, object := range additions {
@@ -179,6 +183,7 @@ func GetAdditionTuples(id valuer.UUID, relation authtypes.Relation, additions []
),
relation,
[]authtypes.Selector{object.Selector},
orgID,
)
if err != nil {
return nil, err
@@ -190,8 +195,8 @@ func GetAdditionTuples(id valuer.UUID, relation authtypes.Relation, additions []
return tuples, nil
}
func GetDeletionTuples(id valuer.UUID, relation authtypes.Relation, deletions []*authtypes.Object) ([]*openfgav1.TupleKeyWithoutCondition, error) {
tuples := make([]*openfgav1.TupleKeyWithoutCondition, 0)
func GetDeletionTuples(id valuer.UUID, orgID valuer.UUID, relation authtypes.Relation, deletions []*authtypes.Object) ([]*openfgav1.TupleKey, error) {
tuples := make([]*openfgav1.TupleKey, 0)
for _, object := range deletions {
typeable := authtypes.MustNewTypeableFromType(object.Resource.Type, object.Resource.Name)
@@ -203,21 +208,13 @@ func GetDeletionTuples(id valuer.UUID, relation authtypes.Relation, deletions []
),
relation,
[]authtypes.Selector{object.Selector},
orgID,
)
if err != nil {
return nil, err
}
deletionTuples := make([]*openfgav1.TupleKeyWithoutCondition, len(transactionTuples))
for idx, tuple := range transactionTuples {
deletionTuples[idx] = &openfgav1.TupleKeyWithoutCondition{
User: tuple.User,
Relation: tuple.Relation,
Object: tuple.Object,
}
}
tuples = append(tuples, deletionTuples...)
tuples = append(tuples, transactionTuples...)
}
return tuples, nil

View File

@@ -207,6 +207,7 @@ func (r *PostableRule) processRuleDefaults() error {
q.Expression = qLabel
}
}
//added alerts v2 fields
if r.SchemaVersion == DefaultSchemaVersion {
thresholdName := CriticalThresholdName
@@ -215,12 +216,20 @@ func (r *PostableRule) processRuleDefaults() error {
thresholdName = severity
}
}
// For anomaly detection with ValueIsBelow, negate the target
targetValue := r.RuleCondition.Target
if r.RuleType == RuleTypeAnomaly && r.RuleCondition.CompareOp == ValueIsBelow && targetValue != nil {
negated := -1 * *targetValue
targetValue = &negated
}
thresholdData := RuleThresholdData{
Kind: BasicThresholdKind,
Spec: BasicRuleThresholds{{
Name: thresholdName,
TargetUnit: r.RuleCondition.TargetUnit,
TargetValue: r.RuleCondition.Target,
TargetValue: targetValue,
MatchType: r.RuleCondition.MatchType,
CompareOp: r.RuleCondition.CompareOp,
Channels: r.PreferredChannels,

View File

@@ -718,3 +718,353 @@ func TestParseIntoRuleMultipleThresholds(t *testing.T) {
assert.Equal(t, 1, len(vector))
}
func TestAnomalyNegationShouldAlert(t *testing.T) {
tests := []struct {
name string
ruleJSON []byte
series v3.Series
shouldAlert bool
expectedValue float64
}{
{
name: "anomaly rule with ValueIsBelow - should alert",
ruleJSON: []byte(`{
"alert": "AnomalyBelowTest",
"ruleType": "anomaly_rule",
"condition": {
"compositeQuery": {
"queryType": "builder",
"queries": [{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [{"metricName": "test", "spaceAggregation": "p50"}],
"stepInterval": "5m"
}
}]
},
"target": 2.0,
"matchType": "1",
"op": "2",
"selectedQuery": "A"
}
}`),
series: v3.Series{
Labels: map[string]string{"host": "server1"},
Points: []v3.Point{
{Timestamp: 1000, Value: -2.1}, // below & at least once, should alert
{Timestamp: 2000, Value: -2.3},
},
},
shouldAlert: true,
expectedValue: -2.1,
},
{
name: "anomaly rule with ValueIsBelow; should not alert",
ruleJSON: []byte(`{
"alert": "AnomalyBelowTest",
"ruleType": "anomaly_rule",
"condition": {
"compositeQuery": {
"queryType": "builder",
"queries": [{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [{"metricName": "test", "spaceAggregation": "p50"}],
"stepInterval": "5m"
}
}]
},
"target": 2.0,
"matchType": "1",
"op": "2",
"selectedQuery": "A"
}
}`), // below & at least once, no value below -2.0
series: v3.Series{
Labels: map[string]string{"host": "server1"},
Points: []v3.Point{
{Timestamp: 1000, Value: -1.9},
{Timestamp: 2000, Value: -1.8},
},
},
shouldAlert: false,
},
{
name: "anomaly rule with ValueIsAbove; should alert",
ruleJSON: []byte(`{
"alert": "AnomalyAboveTest",
"ruleType": "anomaly_rule",
"condition": {
"compositeQuery": {
"queryType": "builder",
"queries": [{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [{"metricName": "test", "spaceAggregation": "p50"}],
"stepInterval": "5m"
}
}]
},
"target": 2.0,
"matchType": "1",
"op": "1",
"selectedQuery": "A"
}
}`), // above & at least once, should alert
series: v3.Series{
Labels: map[string]string{"host": "server1"},
Points: []v3.Point{
{Timestamp: 1000, Value: 2.1}, // above 2.0, should alert
{Timestamp: 2000, Value: 2.2},
},
},
shouldAlert: true,
expectedValue: 2.1,
},
{
name: "anomaly rule with ValueIsAbove; should not alert",
ruleJSON: []byte(`{
"alert": "AnomalyAboveTest",
"ruleType": "anomaly_rule",
"condition": {
"compositeQuery": {
"queryType": "builder",
"queries": [{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [{"metricName": "test", "spaceAggregation": "p50"}],
"stepInterval": "5m"
}
}]
},
"target": 2.0,
"matchType": "1",
"op": "1",
"selectedQuery": "A"
}
}`),
series: v3.Series{
Labels: map[string]string{"host": "server1"},
Points: []v3.Point{
{Timestamp: 1000, Value: 1.1},
{Timestamp: 2000, Value: 1.2},
},
},
shouldAlert: false,
},
{
name: "anomaly rule with ValueIsBelow and AllTheTimes; should alert",
ruleJSON: []byte(`{
"alert": "AnomalyBelowAllTest",
"ruleType": "anomaly_rule",
"condition": {
"compositeQuery": {
"queryType": "builder",
"queries": [{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [{"metricName": "test", "spaceAggregation": "p50"}],
"stepInterval": "5m"
}
}]
},
"target": 2.0,
"matchType": "2",
"op": "2",
"selectedQuery": "A"
}
}`), // below and all the times
series: v3.Series{
Labels: map[string]string{"host": "server1"},
Points: []v3.Point{
{Timestamp: 1000, Value: -2.1}, // all below -2
{Timestamp: 2000, Value: -2.2},
{Timestamp: 3000, Value: -2.5},
},
},
shouldAlert: true,
expectedValue: -2.1, // max value when all are below threshold
},
{
name: "anomaly rule with ValueIsBelow and AllTheTimes; should not alert",
ruleJSON: []byte(`{
"alert": "AnomalyBelowAllTest",
"ruleType": "anomaly_rule",
"condition": {
"compositeQuery": {
"queryType": "builder",
"queries": [{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [{"metricName": "test", "spaceAggregation": "p50"}],
"stepInterval": "5m"
}
}]
},
"target": 2.0,
"matchType": "2",
"op": "2",
"selectedQuery": "A"
}
}`),
series: v3.Series{
Labels: map[string]string{"host": "server1"},
Points: []v3.Point{
{Timestamp: 1000, Value: -3.0},
{Timestamp: 2000, Value: -1.0}, // above -2, breaks condition
{Timestamp: 3000, Value: -2.5},
},
},
shouldAlert: false,
},
{
name: "anomaly rule with ValueOutsideBounds; should alert",
ruleJSON: []byte(`{
"alert": "AnomalyOutOfBoundsTest",
"ruleType": "anomaly_rule",
"condition": {
"compositeQuery": {
"queryType": "builder",
"queries": [{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [{"metricName": "test", "spaceAggregation": "p50"}],
"stepInterval": "5m"
}
}]
},
"target": 7.0,
"matchType": "1",
"op": "7",
"selectedQuery": "A"
}
}`),
series: v3.Series{
Labels: map[string]string{"host": "server1"},
Points: []v3.Point{
{Timestamp: 1000, Value: -8.0}, // abs(8) >= 7, alert
{Timestamp: 2000, Value: 5.0},
},
},
shouldAlert: true,
expectedValue: -8.0,
},
{
name: "non-anomaly threshold rule with ValueIsBelow; should alert",
ruleJSON: []byte(`{
"alert": "ThresholdTest",
"ruleType": "threshold_rule",
"condition": {
"compositeQuery": {
"queryType": "builder",
"queries": [{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [{"metricName": "test", "spaceAggregation": "p50"}],
"stepInterval": "5m"
}
}]
},
"target": 90.0,
"matchType": "1",
"op": "2",
"selectedQuery": "A"
}
}`),
series: v3.Series{
Labels: map[string]string{"host": "server1"},
Points: []v3.Point{
{Timestamp: 1000, Value: 80.0}, // below 90, should alert
{Timestamp: 2000, Value: 85.0},
},
},
shouldAlert: true,
expectedValue: 80.0,
},
{
name: "non-anomaly rule with ValueIsBelow - should not alert",
ruleJSON: []byte(`{
"alert": "ThresholdTest",
"ruleType": "threshold_rule",
"condition": {
"compositeQuery": {
"queryType": "builder",
"queries": [{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "metrics",
"aggregations": [{"metricName": "test", "spaceAggregation": "p50"}],
"stepInterval": "5m"
}
}]
},
"target": 50.0,
"matchType": "1",
"op": "2",
"selectedQuery": "A"
}
}`),
series: v3.Series{
Labels: map[string]string{"host": "server1"},
Points: []v3.Point{
{Timestamp: 1000, Value: 60.0}, // below, should alert
{Timestamp: 2000, Value: 90.0},
},
},
shouldAlert: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
rule := PostableRule{}
err := json.Unmarshal(tt.ruleJSON, &rule)
if err != nil {
t.Fatalf("Failed to unmarshal rule: %v", err)
}
ruleThreshold, err := rule.RuleCondition.Thresholds.GetRuleThreshold()
if err != nil {
t.Fatalf("unexpected error from GetRuleThreshold: %v", err)
}
resultVector, err := ruleThreshold.ShouldAlert(tt.series, "")
if err != nil {
t.Fatalf("unexpected error from ShouldAlert: %v", err)
}
shouldAlert := len(resultVector) > 0
if shouldAlert != tt.shouldAlert {
t.Errorf("Expected shouldAlert=%v, got %v. %s",
tt.shouldAlert, shouldAlert, tt.name)
}
if tt.shouldAlert && len(resultVector) > 0 {
sample := resultVector[0]
if sample.V != tt.expectedValue {
t.Errorf("Expected alert value=%.2f, got %.2f. %s",
tt.expectedValue, sample.V, tt.name)
}
}
})
}
}

View File

@@ -136,97 +136,3 @@ type FieldValueSelector struct {
Value string `json:"value"`
Limit int `json:"limit"`
}
func DataTypeCollisionHandledFieldName(key *TelemetryFieldKey, value any, tblFieldName string) (string, any) {
// This block of code exists to handle the data type collisions
// We don't want to fail the requests when there is a key with more than one data type
// Let's take an example of `http.status_code`, and consider user sent a string value and number value
// When they search for `http.status_code=200`, we will search across both the number columns and string columns
// and return the results from both the columns
// While we expect user not to send the mixed data types, it inevitably happens
// So we handle the data type collisions here
switch key.FieldDataType {
case FieldDataTypeString:
switch v := value.(type) {
case float64:
// try to convert the string value to to number
tblFieldName = castFloat(tblFieldName)
case []any:
if allFloats(v) {
tblFieldName = castFloat(tblFieldName)
} else if hasString(v) {
_, value = castString(tblFieldName), toStrings(v)
}
case bool:
// we don't have a toBoolOrNull in ClickHouse, so we need to convert the bool to a string
value = fmt.Sprintf("%t", v)
}
case FieldDataTypeFloat64, FieldDataTypeInt64, FieldDataTypeNumber:
switch v := value.(type) {
// why? ; CH returns an error for a simple check
// attributes_number['http.status_code'] = 200 but not for attributes_number['http.status_code'] >= 200
// DB::Exception: Bad get: has UInt64, requested Float64.
// How is it working in v4? v4 prepares the full query with values in query string
// When we format the float it becomes attributes_number['http.status_code'] = 200.000
// Which CH gladly accepts and doesn't throw error
// However, when passed as query args, the default formatter
// https://github.com/ClickHouse/clickhouse-go/blob/757e102f6d8c6059d564ce98795b4ce2a101b1a5/bind.go#L393
// is used which prepares the
// final query as attributes_number['http.status_code'] = 200 giving this error
// This following is one way to workaround it
case float32, float64:
tblFieldName = castFloatHack(tblFieldName)
case string:
// try to convert the number attribute to string
tblFieldName = castString(tblFieldName) // numeric col vs string literal
case []any:
if allFloats(v) {
tblFieldName = castFloatHack(tblFieldName)
} else if hasString(v) {
tblFieldName, value = castString(tblFieldName), toStrings(v)
}
}
case FieldDataTypeBool:
switch v := value.(type) {
case string:
tblFieldName = castString(tblFieldName)
case []any:
if hasString(v) {
tblFieldName, value = castString(tblFieldName), toStrings(v)
}
}
}
return tblFieldName, value
}
func castFloat(col string) string { return fmt.Sprintf("toFloat64OrNull(%s)", col) }
func castFloatHack(col string) string { return fmt.Sprintf("toFloat64(%s)", col) }
func castString(col string) string { return fmt.Sprintf("toString(%s)", col) }
func allFloats(in []any) bool {
for _, x := range in {
if _, ok := x.(float64); !ok {
return false
}
}
return true
}
func hasString(in []any) bool {
for _, x := range in {
if _, ok := x.(string); ok {
return true
}
}
return false
}
func toStrings(in []any) []any {
out := make([]any, len(in))
for i, x := range in {
out[i] = fmt.Sprintf("%v", x)
}
return out
}

View File

@@ -2,8 +2,6 @@ package telemetrytypes
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetFieldKeyFromKeyText(t *testing.T) {
@@ -93,115 +91,3 @@ func TestGetFieldKeyFromKeyText(t *testing.T) {
}
}
}
func TestDataTypeCollisionHandledFieldName(t *testing.T) {
tests := []struct {
name string
key *TelemetryFieldKey
value any
tblFieldName string
expectedFieldName string
expectedValue any
}{
{
name: "http_status_code_string_field_with_numeric_value",
key: &TelemetryFieldKey{
Name: "http.status_code",
FieldDataType: FieldDataTypeString,
},
value: float64(200),
tblFieldName: "attribute_string_http$$status_code",
expectedFieldName: "toFloat64OrNull(attribute_string_http$$status_code)",
expectedValue: float64(200),
},
{
name: "service_enabled_string_field_with_bool_value",
key: &TelemetryFieldKey{
Name: "service.enabled",
FieldDataType: FieldDataTypeString,
},
value: true,
tblFieldName: "attribute_string_service$$enabled",
expectedFieldName: "attribute_string_service$$enabled",
expectedValue: "true",
},
{
name: "http_method_string_field_with_string_value",
key: &TelemetryFieldKey{
Name: "http.method",
FieldDataType: FieldDataTypeString,
},
value: "GET",
tblFieldName: "attribute_string_http$$method",
expectedFieldName: "attribute_string_http$$method",
expectedValue: "GET",
},
{
name: "response_times_string_field_with_numeric_array",
key: &TelemetryFieldKey{
Name: "response.times",
FieldDataType: FieldDataTypeString,
},
value: []any{float64(100.5), float64(200.3), float64(150.7)},
tblFieldName: "attribute_string_response$$times",
expectedFieldName: "toFloat64OrNull(attribute_string_response$$times)",
expectedValue: []any{float64(100.5), float64(200.3), float64(150.7)},
},
{
name: "error_codes_string_field_with_mixed_array",
key: &TelemetryFieldKey{
Name: "error.codes",
FieldDataType: FieldDataTypeString,
},
value: []any{float64(500), "TIMEOUT", float64(503)},
tblFieldName: "attribute_string_error$$codes",
expectedFieldName: "attribute_string_error$$codes",
expectedValue: []any{"500", "TIMEOUT", "503"},
},
// numbers
{
name: "http_request_duration_float_field_with_string_value",
key: &TelemetryFieldKey{
Name: "http.request.duration",
FieldDataType: FieldDataTypeFloat64,
},
value: "1234.56",
tblFieldName: "attribute_float64_http$$request$$duration",
expectedFieldName: "toString(attribute_float64_http$$request$$duration)",
expectedValue: "1234.56",
},
// bools
{
name: "feature_enabled_bool_field_with_string_value",
key: &TelemetryFieldKey{
Name: "feature.enabled",
FieldDataType: FieldDataTypeBool,
},
value: "true",
tblFieldName: "attribute_bool_feature$$enabled",
expectedFieldName: "toString(attribute_bool_feature$$enabled)",
expectedValue: "true",
},
{
name: "feature_flags_bool_field_with_mixed_array",
key: &TelemetryFieldKey{
Name: "feature.flags",
FieldDataType: FieldDataTypeBool,
},
value: []any{true, "enabled", false},
tblFieldName: "attribute_bool_feature$$flags",
expectedFieldName: "toString(attribute_bool_feature$$flags)",
expectedValue: []any{"true", "enabled", "false"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
resultFieldName, resultValue := DataTypeCollisionHandledFieldName(tt.key, tt.value, tt.tblFieldName)
assert.Equal(t, tt.expectedFieldName, resultFieldName)
assert.Equal(t, tt.expectedValue, resultValue)
})
}
}

View File

@@ -816,3 +816,429 @@ def test_logs_time_series_count(
),
"value": 9,
} in series[1]["values"]
def test_datatype_collision(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_jwt_token: Callable[[str, str], str],
insert_logs: Callable[[List[Logs]], None],
) -> None:
"""
Setup:
Insert logs with data type collision scenarios to test DataTypeCollisionHandledFieldName function
Tests:
1. severity_number comparison with string value
2. http.status_code with mixed string/number values
3. response.time with string values in numeric field
4. Edge cases: empty strings
"""
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
logs: List[Logs] = []
# Logs with string values in numeric fields
severity_levels = ["DEBUG", "INFO", "WARN"]
for i in range(3):
logs.append(
Logs(
timestamp=now - timedelta(microseconds=i + 1),
resources={
"deployment.environment": "production",
"service.name": "java",
"os.type": "linux",
"host.name": f"linux-00{i%2}",
"cloud.provider": "integration",
"cloud.account.id": f"00{i%2}",
},
attributes={
"log.iostream": "stdout",
"logtag": "F",
"code.file": "/opt/Integration.java",
"code.function": "com.example.Integration.process",
"code.line": i + 1,
"telemetry.sdk.language": "java",
"http.status_code": "200", # String value
"response.time": "123.45", # String value
},
body=f"Test log {i+1} with string values",
severity_text=severity_levels[i], # DEBUG(5-8), INFO(9-12), WARN(13-16)
)
)
# Logs with numeric values in string fields
severity_levels_2 = ["ERROR", "FATAL", "TRACE", "DEBUG"]
for i in range(4):
logs.append(
Logs(
timestamp=now - timedelta(microseconds=i + 10),
resources={
"deployment.environment": "production",
"service.name": "go",
"os.type": "linux",
"host.name": f"linux-00{i%2}",
"cloud.provider": "integration",
"cloud.account.id": f"00{i%2}",
},
attributes={
"log.iostream": "stdout",
"logtag": "F",
"code.file": "/opt/integration.go",
"code.function": "com.example.Integration.process",
"code.line": i + 1,
"telemetry.sdk.language": "go",
"http.status_code": 404, # Numeric value
"response.time": 456.78, # Numeric value
},
body=f"Test log {i+4} with numeric values",
severity_text=severity_levels_2[i], # ERROR(17-20), FATAL(21-24), TRACE(1-4), DEBUG(5-8)
)
)
# Edge case: empty string and zero value
logs.append(
Logs(
timestamp=now - timedelta(microseconds=20),
resources={
"deployment.environment": "production",
"service.name": "python",
"os.type": "linux",
"host.name": "linux-002",
"cloud.provider": "integration",
"cloud.account.id": "002",
},
attributes={
"log.iostream": "stdout",
"logtag": "F",
"code.file": "/opt/integration.py",
"code.function": "com.example.Integration.process",
"code.line": 1,
"telemetry.sdk.language": "python",
"http.status_code": "", # Empty string
"response.time": 0, # Zero value
},
body="Edge case test log",
severity_text="ERROR",
)
)
insert_logs(logs)
token = get_jwt_token(email=USER_ADMIN_EMAIL, password=USER_ADMIN_PASSWORD)
# count() of all logs for the where severity_number > '7'
response = requests.post(
signoz.self.host_configs["8080"].get("/api/v5/query_range"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
json={
"schemaVersion": "v1",
"start": int(
(
datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
- timedelta(minutes=5)
).timestamp()
* 1000
),
"end": int(
datetime.now(tz=timezone.utc)
.replace(second=0, microsecond=0)
.timestamp()
* 1000
),
"requestType": "scalar",
"compositeQuery": {
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "logs",
"stepInterval": 60,
"disabled": False,
"filter": {"expression": "severity_number > '7'"},
"having": {"expression": ""},
"aggregations": [{"expression": "count()"}],
},
}
]
},
"formatOptions": {"formatTableResultForUI": True, "fillGaps": False},
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
count = results[0]["data"][0][0]
assert count == 5
# count() of all logs for the where severity_number > '7.0'
response = requests.post(
signoz.self.host_configs["8080"].get("/api/v5/query_range"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
json={
"schemaVersion": "v1",
"start": int(
(
datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
- timedelta(minutes=5)
).timestamp()
* 1000
),
"end": int(
datetime.now(tz=timezone.utc)
.replace(second=0, microsecond=0)
.timestamp()
* 1000
),
"requestType": "scalar",
"compositeQuery": {
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "logs",
"stepInterval": 60,
"disabled": False,
"filter": {"expression": "severity_number > '7.0'"},
"having": {"expression": ""},
"aggregations": [{"expression": "count()"}],
},
}
]
},
"formatOptions": {"formatTableResultForUI": True, "fillGaps": False},
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
count = results[0]["data"][0][0]
assert count == 5
# Test 2: severity_number comparison with string value
response = requests.post(
signoz.self.host_configs["8080"].get("/api/v5/query_range"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
json={
"schemaVersion": "v1",
"start": int(
(
datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
- timedelta(minutes=5)
).timestamp()
* 1000
),
"end": int(
datetime.now(tz=timezone.utc)
.replace(second=0, microsecond=0)
.timestamp()
* 1000
),
"requestType": "scalar",
"compositeQuery": {
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "logs",
"stepInterval": 60,
"disabled": False,
"filter": {"expression": "severity_number = '13'"}, # String comparison with numeric field
"having": {"expression": ""},
"aggregations": [{"expression": "count()"}],
},
}
]
},
"formatOptions": {"formatTableResultForUI": True, "fillGaps": False},
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
count = results[0]["data"][0][0]
# WARN severity maps to 13-16 range, so should find 1 log with severity_number = 13
assert count == 1
# Test 3: http.status_code with numeric value (query contains number, actual value is string "200")
response = requests.post(
signoz.self.host_configs["8080"].get("/api/v5/query_range"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
json={
"schemaVersion": "v1",
"start": int(
(
datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
- timedelta(minutes=5)
).timestamp()
* 1000
),
"end": int(
datetime.now(tz=timezone.utc)
.replace(second=0, microsecond=0)
.timestamp()
* 1000
),
"requestType": "scalar",
"compositeQuery": {
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "logs",
"stepInterval": 60,
"disabled": False,
"filter": {"expression": "http.status_code = 200"}, # Numeric comparison with string field
"having": {"expression": ""},
"aggregations": [{"expression": "count()"}],
},
}
]
},
"formatOptions": {"formatTableResultForUI": True, "fillGaps": False},
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
count = results[0]["data"][0][0]
# Should return 3 logs with http.status_code = "200" (first 3 logs have string value "200")
assert count == 3
# Test 4: http.status_code with string value (query contains string, actual value is numeric 404)
response = requests.post(
signoz.self.host_configs["8080"].get("/api/v5/query_range"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
json={
"schemaVersion": "v1",
"start": int(
(
datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
- timedelta(minutes=5)
).timestamp()
* 1000
),
"end": int(
datetime.now(tz=timezone.utc)
.replace(second=0, microsecond=0)
.timestamp()
* 1000
),
"requestType": "scalar",
"compositeQuery": {
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "logs",
"stepInterval": 60,
"disabled": False,
"filter": {"expression": "http.status_code = '404'"}, # String comparison with numeric field
"having": {"expression": ""},
"aggregations": [{"expression": "count()"}],
},
}
]
},
"formatOptions": {"formatTableResultForUI": True, "fillGaps": False},
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
count = results[0]["data"][0][0]
# Should return 4 logs with http.status_code = 404 (next 4 logs have numeric value 404)
assert count == 4
# Test 5: Edge case - empty string comparison
response = requests.post(
signoz.self.host_configs["8080"].get("/api/v5/query_range"),
timeout=2,
headers={
"authorization": f"Bearer {token}",
},
json={
"schemaVersion": "v1",
"start": int(
(
datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
- timedelta(minutes=5)
).timestamp()
* 1000
),
"end": int(
datetime.now(tz=timezone.utc)
.replace(second=0, microsecond=0)
.timestamp()
* 1000
),
"requestType": "scalar",
"compositeQuery": {
"queries": [
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "logs",
"stepInterval": 60,
"disabled": False,
"filter": {"expression": "http.status_code = ''"}, # Empty string comparison
"having": {"expression": ""},
"aggregations": [{"expression": "count()"}],
},
}
]
},
"formatOptions": {"formatTableResultForUI": True, "fillGaps": False},
},
)
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
results = response.json()["data"]["data"]["results"]
assert len(results) == 1
count = results[0]["data"][0][0]
# Should return 1 log with empty http.status_code (edge case log)
assert count == 1