Compare commits

..

3 Commits

Author SHA1 Message Date
Tushar Vats
e1866b4bf4 feat: added integration tests 2026-02-10 04:33:59 +05:30
Tushar Vats
127cafa4d9 feat: enable searching span events 2026-02-10 01:05:13 +05:30
Ashwin Bhatkal
df49484bea fix: fix flaky dashboard test (#10254)
Some checks are pending
build-staging / prepare (push) Waiting to run
build-staging / js-build (push) Blocked by required conditions
build-staging / go-build (push) Blocked by required conditions
build-staging / staging (push) Blocked by required conditions
Release Drafter / update_release_draft (push) Waiting to run
2026-02-09 14:43:48 +00:00
15 changed files with 363 additions and 473 deletions

View File

@@ -1,62 +0,0 @@
import { isInvalidPlotValue, normalizePlotValue } from '../dataUtils';
describe('dataUtils', () => {
describe('isInvalidPlotValue', () => {
it('treats null and undefined as invalid', () => {
expect(isInvalidPlotValue(null)).toBe(true);
expect(isInvalidPlotValue(undefined)).toBe(true);
});
it('treats finite numbers as valid and non-finite as invalid', () => {
expect(isInvalidPlotValue(0)).toBe(false);
expect(isInvalidPlotValue(123.45)).toBe(false);
expect(isInvalidPlotValue(Number.NaN)).toBe(true);
expect(isInvalidPlotValue(Infinity)).toBe(true);
expect(isInvalidPlotValue(-Infinity)).toBe(true);
});
it('treats well-formed numeric strings as valid', () => {
expect(isInvalidPlotValue('0')).toBe(false);
expect(isInvalidPlotValue('123.45')).toBe(false);
expect(isInvalidPlotValue('-1')).toBe(false);
});
it('treats Infinity/NaN string variants and non-numeric strings as invalid', () => {
expect(isInvalidPlotValue('+Inf')).toBe(true);
expect(isInvalidPlotValue('-Inf')).toBe(true);
expect(isInvalidPlotValue('Infinity')).toBe(true);
expect(isInvalidPlotValue('-Infinity')).toBe(true);
expect(isInvalidPlotValue('NaN')).toBe(true);
expect(isInvalidPlotValue('not-a-number')).toBe(true);
});
it('treats non-number, non-string values as valid (left to caller)', () => {
expect(isInvalidPlotValue({})).toBe(false);
expect(isInvalidPlotValue([])).toBe(false);
expect(isInvalidPlotValue(true)).toBe(false);
});
});
describe('normalizePlotValue', () => {
it('returns null for invalid values detected by isInvalidPlotValue', () => {
expect(normalizePlotValue(null)).toBeNull();
expect(normalizePlotValue(undefined)).toBeNull();
expect(normalizePlotValue(NaN)).toBeNull();
expect(normalizePlotValue(Infinity)).toBeNull();
expect(normalizePlotValue('-Infinity')).toBeNull();
expect(normalizePlotValue('not-a-number')).toBeNull();
});
it('parses valid numeric strings into numbers', () => {
expect(normalizePlotValue('0')).toBe(0);
expect(normalizePlotValue('123.45')).toBe(123.45);
expect(normalizePlotValue('-1')).toBe(-1);
});
it('passes through valid numbers unchanged', () => {
expect(normalizePlotValue(0)).toBe(0);
expect(normalizePlotValue(123)).toBe(123);
expect(normalizePlotValue(42.5)).toBe(42.5);
});
});
});

View File

@@ -1,201 +0,0 @@
import uPlot from 'uplot';
import { DistributionType } from '../../config/types';
import * as scaleUtils from '../scale';
describe('scale utils', () => {
describe('normalizeLogScaleLimits', () => {
it('returns limits unchanged when distribution is not logarithmic', () => {
const limits = {
min: 1,
max: 100,
softMin: 5,
softMax: 50,
};
const result = scaleUtils.normalizeLogScaleLimits({
distr: DistributionType.Linear,
logBase: 10,
limits,
});
expect(result).toEqual(limits);
});
it('snaps positive limits to powers of the log base when distribution is logarithmic', () => {
const result = scaleUtils.normalizeLogScaleLimits({
distr: DistributionType.Logarithmic,
logBase: 10,
limits: {
min: 3,
max: 900,
softMin: 12,
softMax: 85,
},
});
expect(result.min).toBe(1); // 10^0
expect(result.max).toBe(1000); // 10^3
expect(result.softMin).toBe(10); // 10^1
expect(result.softMax).toBe(100); // 10^2
});
});
describe('getDistributionConfig', () => {
it('returns empty config for time scales', () => {
const config = scaleUtils.getDistributionConfig({
time: true,
distr: DistributionType.Linear,
logBase: 2,
});
expect(config).toEqual({});
});
it('returns linear distribution settings for non-time scales', () => {
const config = scaleUtils.getDistributionConfig({
time: false,
distr: DistributionType.Linear,
logBase: 2,
});
expect(config.distr).toBe(1);
expect(config.log).toBe(2);
});
it('returns log distribution settings for non-time scales', () => {
const config = scaleUtils.getDistributionConfig({
time: false,
distr: DistributionType.Logarithmic,
logBase: 10,
});
expect(config.distr).toBe(3);
expect(config.log).toBe(10);
});
});
describe('getRangeConfig', () => {
it('computes range config and fixed range flags correctly', () => {
const {
rangeConfig,
hardMinOnly,
hardMaxOnly,
hasFixedRange,
} = scaleUtils.getRangeConfig(0, 100, null, null, 0.1, 0.2);
expect(rangeConfig.min).toEqual({
pad: 0.1,
hard: 0,
soft: undefined,
mode: 3,
});
expect(rangeConfig.max).toEqual({
pad: 0.2,
hard: 100,
soft: undefined,
mode: 3,
});
expect(hardMinOnly).toBe(true);
expect(hardMaxOnly).toBe(true);
expect(hasFixedRange).toBe(true);
});
});
describe('createRangeFunction', () => {
it('returns [dataMin, dataMax] when no fixed range and no data', () => {
const params = {
rangeConfig: {} as uPlot.Range.Config,
hardMinOnly: false,
hardMaxOnly: false,
hasFixedRange: false,
min: null,
max: null,
};
const rangeFn = scaleUtils.createRangeFunction(params);
const u = ({
scales: {
y: {
distr: 1,
log: 10,
},
},
} as unknown) as uPlot;
const result = rangeFn(
u,
(null as unknown) as number,
(null as unknown) as number,
'y',
);
expect(result).toEqual([null, null]);
});
it('applies hard min/max for linear scale when only hard limits are set', () => {
const params = {
rangeConfig: {} as uPlot.Range.Config,
hardMinOnly: true,
hardMaxOnly: true,
hasFixedRange: true,
min: 0,
max: 100,
};
const rangeFn = scaleUtils.createRangeFunction(params);
// Use an undefined distr so the range function skips calling uPlot.rangeNum
// and we can focus on the behavior of applyHardLimits.
const u = ({
scales: {
y: {
distr: undefined,
log: 10,
},
},
} as unknown) as uPlot;
const result = rangeFn(u, 10, 20, 'y');
// After applyHardLimits, the returned range should respect configured min/max
expect(result).toEqual([0, 100]);
});
});
describe('adjustSoftLimitsWithThresholds', () => {
it('returns original soft limits when there are no thresholds', () => {
const result = scaleUtils.adjustSoftLimitsWithThresholds(1, 5, [], 'ms');
expect(result).toEqual({ softMin: 1, softMax: 5 });
});
it('expands soft limits to include threshold min/max values', () => {
const result = scaleUtils.adjustSoftLimitsWithThresholds(
3,
6,
[{ thresholdValue: 2 }, { thresholdValue: 8 }],
'ms',
);
// min should be pulled down to the smallest threshold value
expect(result.softMin).toBe(2);
// max should be pushed up to the largest threshold value
expect(result.softMax).toBe(8);
});
});
describe('getFallbackMinMaxTimeStamp', () => {
it('returns a 24-hour window ending at approximately now', () => {
const { fallbackMin, fallbackMax } = scaleUtils.getFallbackMinMaxTimeStamp();
// Difference should be exactly one day in seconds
expect(fallbackMax - fallbackMin).toBe(86400);
// Both should be reasonable timestamps (not NaN or negative)
expect(fallbackMin).toBeGreaterThan(0);
expect(fallbackMax).toBeGreaterThan(fallbackMin);
});
});
});

View File

@@ -1,36 +0,0 @@
import { findMinMaxThresholdValues } from '../threshold';
describe('findMinMaxThresholdValues', () => {
it('returns [null, null] when thresholds array is empty or missing', () => {
expect(findMinMaxThresholdValues([], 'ms')).toEqual([null, null]);
// @ts-expect-error intentional undefined to cover defensive branch
expect(findMinMaxThresholdValues(undefined, 'ms')).toEqual([null, null]);
});
it('returns min and max from thresholdValue when units are not provided', () => {
const thresholds = [
{ thresholdValue: 5 },
{ thresholdValue: 1 },
{ thresholdValue: 10 },
];
const [min, max] = findMinMaxThresholdValues(thresholds);
expect(min).toBe(1);
expect(max).toBe(10);
});
it('ignores thresholds without a value or with unconvertible units', () => {
const thresholds = [
// Should be ignored: convertValue returns null for unknown unit
{ thresholdValue: 100, thresholdUnit: 'unknown-unit' },
// Should be used
{ thresholdValue: 4 },
];
const [min, max] = findMinMaxThresholdValues(thresholds, 'ms');
expect(min).toBe(4);
expect(max).toBe(4);
});
});

View File

@@ -412,14 +412,16 @@ describe('Dashboard Provider - URL Variables Integration', () => {
});
// Verify dashboard state contains the variables with default values
const dashboardVariables = await screen.findByTestId('dashboard-variables');
const parsedVariables = JSON.parse(dashboardVariables.textContent || '{}');
await waitFor(() => {
const dashboardVariables = screen.getByTestId('dashboard-variables');
const parsedVariables = JSON.parse(dashboardVariables.textContent || '{}');
expect(parsedVariables).toHaveProperty('environment');
expect(parsedVariables).toHaveProperty('services');
// Default allSelected values should be preserved
expect(parsedVariables.environment.allSelected).toBe(false);
expect(parsedVariables.services.allSelected).toBe(false);
expect(parsedVariables).toHaveProperty('environment');
expect(parsedVariables).toHaveProperty('services');
// Default allSelected values should be preserved
expect(parsedVariables.environment.allSelected).toBe(false);
expect(parsedVariables.services.allSelected).toBe(false);
});
});
it('should merge URL variables with dashboard data and normalize values correctly', async () => {
@@ -466,16 +468,26 @@ describe('Dashboard Provider - URL Variables Integration', () => {
});
// Verify the dashboard state reflects the normalized URL values
const dashboardVariables = await screen.findByTestId('dashboard-variables');
const parsedVariables = JSON.parse(dashboardVariables.textContent || '{}');
await waitFor(() => {
const dashboardVariables = screen.getByTestId('dashboard-variables');
const parsedVariables = JSON.parse(dashboardVariables.textContent || '{}');
// The selectedValue should be updated with normalized URL values
expect(parsedVariables.environment.selectedValue).toBe('development');
expect(parsedVariables.services.selectedValue).toEqual(['db', 'cache']);
// First ensure the variables exist
expect(parsedVariables).toHaveProperty('environment');
expect(parsedVariables).toHaveProperty('services');
// allSelected should be set to false when URL values override
expect(parsedVariables.environment.allSelected).toBe(false);
expect(parsedVariables.services.allSelected).toBe(false);
// Then check their properties
expect(parsedVariables.environment).toHaveProperty('selectedValue');
expect(parsedVariables.services).toHaveProperty('selectedValue');
// The selectedValue should be updated with normalized URL values
expect(parsedVariables.environment.selectedValue).toBe('development');
expect(parsedVariables.services.selectedValue).toEqual(['db', 'cache']);
// allSelected should be set to false when URL values override
expect(parsedVariables.environment.allSelected).toBe(false);
expect(parsedVariables.services.allSelected).toBe(false);
});
});
it('should handle ALL_SELECTED_VALUE from URL and set allSelected correctly', async () => {
@@ -500,8 +512,8 @@ describe('Dashboard Provider - URL Variables Integration', () => {
);
// Verify that allSelected is set to true for the services variable
await waitFor(async () => {
const dashboardVariables = await screen.findByTestId('dashboard-variables');
await waitFor(() => {
const dashboardVariables = screen.getByTestId('dashboard-variables');
const parsedVariables = JSON.parse(dashboardVariables.textContent || '{}');
expect(parsedVariables.services.allSelected).toBe(true);
@@ -603,8 +615,8 @@ describe('Dashboard Provider - Textbox Variable Backward Compatibility', () => {
});
// Verify that defaultValue is set from textboxValue
await waitFor(async () => {
const dashboardVariables = await screen.findByTestId('dashboard-variables');
await waitFor(() => {
const dashboardVariables = screen.getByTestId('dashboard-variables');
const parsedVariables = JSON.parse(dashboardVariables.textContent || '{}');
expect(parsedVariables.myTextbox.type).toBe('TEXTBOX');
@@ -648,8 +660,8 @@ describe('Dashboard Provider - Textbox Variable Backward Compatibility', () => {
});
// Verify that existing defaultValue is preserved
await waitFor(async () => {
const dashboardVariables = await screen.findByTestId('dashboard-variables');
await waitFor(() => {
const dashboardVariables = screen.getByTestId('dashboard-variables');
const parsedVariables = JSON.parse(dashboardVariables.textContent || '{}');
expect(parsedVariables.myTextbox.type).toBe('TEXTBOX');
@@ -694,8 +706,8 @@ describe('Dashboard Provider - Textbox Variable Backward Compatibility', () => {
});
// Verify that defaultValue is set to empty string
await waitFor(async () => {
const dashboardVariables = await screen.findByTestId('dashboard-variables');
await waitFor(() => {
const dashboardVariables = screen.getByTestId('dashboard-variables');
const parsedVariables = JSON.parse(dashboardVariables.textContent || '{}');
expect(parsedVariables.myTextbox.type).toBe('TEXTBOX');
@@ -739,8 +751,8 @@ describe('Dashboard Provider - Textbox Variable Backward Compatibility', () => {
});
// Verify that defaultValue is NOT set from textboxValue for QUERY type
await waitFor(async () => {
const dashboardVariables = await screen.findByTestId('dashboard-variables');
await waitFor(() => {
const dashboardVariables = screen.getByTestId('dashboard-variables');
const parsedVariables = JSON.parse(dashboardVariables.textContent || '{}');
expect(parsedVariables.myQuery.type).toBe('QUERY');

View File

@@ -225,6 +225,12 @@ func (c *conditionBuilder) conditionFor(
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for map column type %s", valueType)
}
case schema.ColumnTypeEnumArray:
if operator == qbtypes.FilterOperatorExists {
return fmt.Sprintf("notEmpty(%s)", column.Name), nil
} else {
return fmt.Sprintf("empty(%s)", column.Name), nil
}
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "exists operator is not supported for column type %s", column.Type)
}

View File

@@ -244,6 +244,19 @@ func TestConditionFor(t *testing.T) {
expectedArgs: []any{"%admin%", true},
expectedError: nil,
},
{
name: "Contains operator - array field",
key: telemetrytypes.TelemetryFieldKey{
Name: "events",
FieldContext: telemetrytypes.FieldContextSpan,
FieldDataType: telemetrytypes.FieldDataTypeArrayString,
},
operator: qbtypes.FilterOperatorContains,
value: "admin",
expectedSQL: "WHERE (LOWER(arrayStringConcat(events, ' ')) LIKE LOWER(?) AND notEmpty(events))",
expectedArgs: []any{"%admin%", true},
expectedError: nil,
},
{
name: "In operator - map field",
key: telemetrytypes.TelemetryFieldKey{

View File

@@ -77,6 +77,13 @@ var (
FieldContext: telemetrytypes.FieldContextSpan,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
"events": {
Name: "events",
Description: "Span events",
Signal: telemetrytypes.SignalTraces,
FieldContext: telemetrytypes.FieldContextSpan,
FieldDataType: telemetrytypes.FieldDataTypeArrayString,
},
}
IntrinsicFieldsDeprecated = map[string]telemetrytypes.TelemetryFieldKey{
"traceID": {

View File

@@ -301,9 +301,15 @@ func (m *defaultFieldMapper) FieldFor(
default:
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "value type %s is not supported for map column type %s", valueType, column.Type)
}
case schema.ColumnTypeEnumArray:
valueType := column.Type.(schema.ArrayColumnType).ElementType
if valueType.GetType() != schema.ColumnTypeEnumString {
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "value type %s is not supported for array column type %s", valueType, column.Type)
}
return fmt.Sprintf("arrayStringConcat(%s, ' ')", column.Name), nil
}
// should not reach here
return column.Name, nil
return column.Name, errors.NewInvalidInputf(errors.CodeInternal, "unable to identify field for %s", key)
}
// ColumnExpressionFor returns the column expression for the given field

View File

@@ -621,6 +621,45 @@ func TestStatementBuilderListQuery(t *testing.T) {
},
expectedErr: nil,
},
{
name: "List query with span events",
requestType: qbtypes.RequestTypeRaw,
query: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
Signal: telemetrytypes.SignalTraces,
StepInterval: qbtypes.Step{Duration: 30 * time.Second},
Filter: &qbtypes.Filter{
Expression: "events contains 'redis-manual'",
},
SelectFields: []telemetrytypes.TelemetryFieldKey{
{
Name: "name",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
{
Name: "serviceName",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
{
Name: "durationNano",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeNumber,
},
{
Name: "httpMethod",
FieldContext: telemetrytypes.FieldContextAttribute,
FieldDataType: telemetrytypes.FieldDataTypeString,
},
},
Limit: 10,
},
expected: qbtypes.Statement{
Query: "WITH __resource_filter AS (SELECT fingerprint FROM signoz_traces.distributed_traces_v3_resource WHERE true AND seen_at_ts_bucket_start >= ? AND seen_at_ts_bucket_start <= ?) SELECT name AS `name`, resource_string_service$$name AS `serviceName`, duration_nano AS `durationNano`, http_method AS `httpMethod`, timestamp AS `timestamp`, span_id AS `span_id`, trace_id AS `trace_id` FROM signoz_traces.distributed_signoz_index_v3 WHERE resource_fingerprint GLOBAL IN (SELECT fingerprint FROM __resource_filter) AND (LOWER(arrayStringConcat(events, ' ')) LIKE LOWER(?) AND notEmpty(events)) AND timestamp >= ? AND timestamp < ? AND ts_bucket_start >= ? AND ts_bucket_start <= ? LIMIT ?",
Args: []any{uint64(1747945619), uint64(1747983448), "%redis-manual%", "1747947419000000000", "1747983448000000000", uint64(1747945619), uint64(1747983448), 10},
},
expectedErr: nil,
},
{
name: "List query with legacy fields with field that doesn't exist",
requestType: qbtypes.RequestTypeRaw,

View File

@@ -111,6 +111,13 @@ func buildCompleteFieldKeyMap() map[string][]*telemetrytypes.TelemetryFieldKey {
FieldDataType: telemetrytypes.FieldDataTypeBool,
},
},
"events": {
{
Name: "events",
FieldContext: telemetrytypes.FieldContextSpan,
FieldDataType: telemetrytypes.FieldDataTypeArrayString,
},
},
}
for _, keys := range keysMap {
for _, key := range keys {

View File

@@ -452,7 +452,7 @@ class Traces(ABC):
self.events = []
for event in events:
self.events.append(
json.dumps([event.name, event.time_unix_nano, event.attribute_map])
json.dumps([event.name, int(event.time_unix_nano), event.attribute_map])
)
# Create error events for exception events (following Go exporter logic)

View File

@@ -16,6 +16,7 @@ from fixtures.querier import (
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
from src.querier.util import (
assert_identical_query_response,
default_select_fields_results_lambda,
format_timestamp,
generate_traces_with_corrupt_metadata,
)
@@ -480,30 +481,16 @@ def test_traces_list(
@pytest.mark.parametrize(
"payload,status_code,results",
"spec,status_code,results",
[
# Case 1: order by timestamp field which there in attributes as well
pytest.param(
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"disabled": False,
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
"limit": 1,
},
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
"limit": 1,
},
HTTPStatus.OK,
lambda x: [
x[3].duration_nano,
x[3].name,
x[3].response_status_code,
x[3].service_name,
x[3].span_id,
format_timestamp(x[3].timestamp),
x[3].trace_id,
], # type: Callable[[List[Traces]], List[Any]]
default_select_fields_results_lambda(3),
),
# Case 2: order by attribute timestamp field which is there in attributes as well
# This should break but it doesn't because attribute.timestamp gets adjusted to timestamp
@@ -511,39 +498,19 @@ def test_traces_list(
# instrinsic field
pytest.param(
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"disabled": False,
"order": [
{"key": {"name": "attribute.timestamp"}, "direction": "desc"}
],
"limit": 1,
},
"order": [
{"key": {"name": "attribute.timestamp"}, "direction": "desc"}
],
"limit": 1,
},
HTTPStatus.OK,
lambda x: [
x[3].duration_nano,
x[3].name,
x[3].response_status_code,
x[3].service_name,
x[3].span_id,
format_timestamp(x[3].timestamp),
x[3].trace_id,
], # type: Callable[[List[Traces]], List[Any]]
default_select_fields_results_lambda(3),
),
# Case 3: select timestamp with empty order by
pytest.param(
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"disabled": False,
"selectFields": [{"name": "timestamp"}],
"limit": 1,
},
"selectFields": [{"name": "timestamp"}],
"limit": 1,
},
HTTPStatus.OK,
lambda x: [
@@ -556,15 +523,9 @@ def test_traces_list(
# This doesn't return any data because of where_clause using aliased timestamp
pytest.param(
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"filter": {"expression": "attribute.timestamp exists"},
"disabled": False,
"selectFields": [{"name": "attribute.timestamp"}],
"limit": 1,
},
"filter": {"expression": "attribute.timestamp exists"},
"selectFields": [{"name": "attribute.timestamp"}],
"limit": 1,
},
HTTPStatus.OK,
lambda x: [], # type: Callable[[List[Traces]], List[Any]]
@@ -572,15 +533,9 @@ def test_traces_list(
# Case 5: select timestamp with timestamp order by
pytest.param(
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"disabled": False,
"selectFields": [{"name": "timestamp"}],
"limit": 1,
"order": [{"key": {"name": "timestamp"}, "direction": "asc"}],
},
"selectFields": [{"name": "timestamp"}],
"limit": 1,
"order": [{"key": {"name": "timestamp"}, "direction": "asc"}],
},
HTTPStatus.OK,
lambda x: [
@@ -592,15 +547,9 @@ def test_traces_list(
# Case 6: select duration_nano with duration order by
pytest.param(
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"disabled": False,
"selectFields": [{"name": "duration_nano"}],
"limit": 1,
"order": [{"key": {"name": "duration_nano"}, "direction": "desc"}],
},
"selectFields": [{"name": "duration_nano"}],
"limit": 1,
"order": [{"key": {"name": "duration_nano"}, "direction": "desc"}],
},
HTTPStatus.OK,
lambda x: [
@@ -613,21 +562,15 @@ def test_traces_list(
# Case 7: select attribute.duration_nano with attribute.duration_nano order by
pytest.param(
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"disabled": False,
"selectFields": [{"name": "attribute.duration_nano"}],
"filter": {"expression": "attribute.duration_nano exists"},
"limit": 1,
"order": [
{
"key": {"name": "attribute.duration_nano"},
"direction": "desc",
}
],
},
"selectFields": [{"name": "attribute.duration_nano"}],
"filter": {"expression": "attribute.duration_nano exists"},
"limit": 1,
"order": [
{
"key": {"name": "attribute.duration_nano"},
"direction": "desc",
}
],
},
HTTPStatus.OK,
lambda x: [
@@ -640,15 +583,9 @@ def test_traces_list(
# Case 8: select attribute.duration_nano with duration order by
pytest.param(
{
"type": "builder_query",
"spec": {
"name": "A",
"signal": "traces",
"disabled": False,
"selectFields": [{"name": "attribute.duration_nano"}],
"limit": 1,
"order": [{"key": {"name": "duration_nano"}, "direction": "desc"}],
},
"selectFields": [{"name": "attribute.duration_nano"}],
"limit": 1,
"order": [{"key": {"name": "duration_nano"}, "direction": "desc"}],
},
HTTPStatus.OK,
lambda x: [
@@ -665,7 +602,7 @@ def test_traces_list_with_corrupt_data(
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[List[Traces]], None],
payload: Dict[str, Any],
spec: Dict[str, Any],
status_code: HTTPStatus,
results: Callable[[List[Traces]], List[Any]],
) -> None:
@@ -690,7 +627,7 @@ def test_traces_list_with_corrupt_data(
),
end_ms=int(datetime.now(tz=timezone.utc).timestamp() * 1000),
request_type="raw",
queries=[payload],
queries=[{"type": "builder_query", "spec": {"signal": "traces", **spec}}],
)
assert response.status_code == status_code
@@ -2026,3 +1963,113 @@ def test_traces_fill_zero_formula_with_group_by(
expected_by_ts=expectations[service_name],
context=f"traces/fillZero/F1/{service_name}",
)
@pytest.mark.parametrize(
"spec,status_code,results",
[
# Case 1: select events
pytest.param(
{
"selectFields": [{"name": "events"}],
"filter": {"expression": "events exists"},
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
},
HTTPStatus.OK,
lambda x: [
x[2].events[0],
], # type: Callable[[List[Traces]], List[Any]]
),
# Case 2: Filter by events not exists
pytest.param(
{
"filter": {"expression": "events not exists"},
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
},
HTTPStatus.OK,
default_select_fields_results_lambda(3),
),
# Case 3: filter by events contains
pytest.param(
{
"filter": {"expression": "events contains 'login.session.id'"},
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
},
HTTPStatus.OK,
default_select_fields_results_lambda(2),
),
# Case 4: filter by equal // This won't give you results
pytest.param(
{
"filter": {"expression": "events = 'login.session.id'"},
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
},
HTTPStatus.OK,
lambda x: None,
),
# Case 5: filter by contains
pytest.param(
{
"filter": {"expression": "events contains 'user_login'"},
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
},
HTTPStatus.OK,
default_select_fields_results_lambda(0),
),
# Case 6: filter by regex
pytest.param(
{
"filter": {"expression": "events regexp 'user_logout.*login.session.id.*123'"},
"order": [{"key": {"name": "timestamp"}, "direction": "desc"}],
},
HTTPStatus.OK,
default_select_fields_results_lambda(2),
),
],
)
def test_traces_query_span_events(
signoz: types.SigNoz,
create_user_admin: None, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
insert_traces: Callable[[List[Traces]], None],
spec: Dict[str, str],
status_code: HTTPStatus,
results: Callable[[List[Traces]], List[Any]],
) -> None:
"""
Setup:
Insert 4 traces with event data.
Tests:
"""
traces = generate_traces_with_corrupt_metadata(with_trace_events=True)
insert_traces(traces)
# 4 Traces with corrupt metadata inserted
# traces[i] occured before traces[j] where i < j
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
response = make_query_request(
signoz,
token,
start_ms=int(
(datetime.now(tz=timezone.utc) - timedelta(minutes=5)).timestamp() * 1000
),
end_ms=int(datetime.now(tz=timezone.utc).timestamp() * 1000),
request_type="raw",
queries=[
{"type": "builder_query", "spec": {"signal": "traces", "limit": 1, **spec}}
],
)
assert response.status_code == status_code
if response.status_code == HTTPStatus.OK:
if not results(traces):
# No results expected
assert response.json()["data"]["data"]["results"][0]["rows"] is None
else:
data = response.json()["data"]["data"]["results"][0]["rows"][0]["data"]
# match exact values
for key, value in zip(list(data.keys()), results(traces)):
assert data[key] == value

View File

@@ -1,10 +1,16 @@
from datetime import datetime, timedelta, timezone
from http import HTTPStatus
from typing import List
from typing import Any, Callable, List
import requests
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
from fixtures.traces import (
TraceIdGenerator,
Traces,
TracesEvent,
TracesKind,
TracesStatusCode,
)
def format_timestamp(dt: datetime) -> str:
@@ -38,7 +44,7 @@ def assert_identical_query_response(
), "Response data do not match"
def generate_traces_with_corrupt_metadata() -> List[Traces]:
def generate_traces_with_corrupt_metadata(with_trace_events=False) -> List[Traces]:
"""
Specifically, entries with 'id', 'timestamp', 'trace_id' and 'duration_nano' fields in metadata
"""
@@ -71,6 +77,17 @@ def generate_traces_with_corrupt_metadata() -> List[Traces]:
"cloud.account.id": "000",
"trace_id": "corrupt_data",
},
events=(
[]
if not with_trace_events
else [
TracesEvent(
name="user_login",
timestamp=now - timedelta(seconds=4),
attribute_map={"login.session.id": "123"},
)
]
),
attributes={
"net.transport": "IP.TCP",
"http.scheme": "http",
@@ -130,6 +147,17 @@ def generate_traces_with_corrupt_metadata() -> List[Traces]:
"http.status_code": "404",
"id": "1",
},
events=(
[]
if not with_trace_events
else [
TracesEvent(
name="user_logout",
timestamp=now - timedelta(seconds=4),
attribute_map={"login.session.id": "123"},
)
]
),
),
Traces(
timestamp=now - timedelta(seconds=1),
@@ -158,3 +186,15 @@ def generate_traces_with_corrupt_metadata() -> List[Traces]:
},
),
]
def default_select_fields_results_lambda(i: int) -> Callable[[List[Traces]], List[Any]]:
return lambda x: [
x[i].duration_nano,
x[i].name,
x[i].response_status_code,
x[i].service_name,
x[i].span_id,
format_timestamp(x[i].timestamp),
x[i].trace_id,
]

View File

@@ -1,23 +1,23 @@
import pytest
from http import HTTPStatus
from typing import Callable
import pytest
import requests
from sqlalchemy import sql
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD
from fixtures.types import Operation, SigNoz
ANONYMOUS_USER_ID = "00000000-0000-0000-0000-000000000000"
def test_managed_roles_create_on_register(
signoz: SigNoz,
create_user_admin: Operation, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
):
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# get the list of all roles.
response = requests.get(
signoz.self.host_configs["8080"].get("/api/v1/roles"),
@@ -32,18 +32,22 @@ def test_managed_roles_create_on_register(
# since this check happens immediately post registeration, all the managed roles should be present.
assert len(data) == 4
role_names = {role["name"] for role in data}
expected_names = {"signoz-admin", "signoz-viewer", "signoz-editor", "signoz-anonymous"}
expected_names = {
"signoz-admin",
"signoz-viewer",
"signoz-editor",
"signoz-anonymous",
}
# do the set mapping as this is order insensitive, direct list match is order-sensitive.
assert set(role_names) == expected_names
def test_root_user_signoz_admin_assignment(
request: pytest.FixtureRequest,
signoz: SigNoz,
create_user_admin: Operation, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
):
):
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# Get the user from the /user/me endpoint and extract the id
@@ -64,14 +68,16 @@ def test_root_user_signoz_admin_assignment(
# this validates to some extent that the role assignment is complete under the assumption that middleware is functioning as expected.
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
# Loop over the roles and get the org_id and id for signoz-admin role
roles = response.json()["data"]
admin_role_entry = next((role for role in roles if role["name"] == "signoz-admin"), None)
admin_role_entry = next(
(role for role in roles if role["name"] == "signoz-admin"), None
)
assert admin_role_entry is not None
org_id = admin_role_entry["orgId"]
# to be super sure of authorization server, let's validate the tuples in DB as well.
# to be super sure of authorization server, let's validate the tuples in DB as well.
# todo[@vikrantgupta25]: replace this with role memebers handler once built.
with signoz.sqlstore.conn.connect() as conn:
# verify the entry present for role assignment
@@ -80,15 +86,14 @@ def test_root_user_signoz_admin_assignment(
sql.text("SELECT * FROM tuple WHERE object_id = :object_id"),
{"object_id": tuple_object_id},
)
tuple_row = tuple_result.mappings().fetchone()
assert tuple_row is not None
# check that the tuple if for role assignment
assert tuple_row['object_type'] == "role"
assert tuple_row['relation'] == "assignee"
assert tuple_row["object_type"] == "role"
assert tuple_row["relation"] == "assignee"
if request.config.getoption("--sqlstore-provider") == 'sqlite':
if request.config.getoption("--sqlstore-provider") == "sqlite":
user_object_id = f"organization/{org_id}/user/{user_id}"
assert tuple_row["user_object_type"] == "user"
assert tuple_row["user_object_id"] == user_object_id
@@ -97,13 +102,13 @@ def test_root_user_signoz_admin_assignment(
assert tuple_row["user_type"] == "user"
assert tuple_row["_user"] == _user
def test_anonymous_user_signoz_anonymous_assignment(
request: pytest.FixtureRequest,
signoz: SigNoz,
create_user_admin: Operation, # pylint: disable=unused-argument
get_token: Callable[[str, str], str],
):
):
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
response = requests.get(
@@ -115,14 +120,16 @@ def test_anonymous_user_signoz_anonymous_assignment(
# this validates to some extent that the role assignment is complete under the assumption that middleware is functioning as expected.
assert response.status_code == HTTPStatus.OK
assert response.json()["status"] == "success"
# Loop over the roles and get the org_id and id for signoz-admin role
roles = response.json()["data"]
admin_role_entry = next((role for role in roles if role["name"] == "signoz-anonymous"), None)
admin_role_entry = next(
(role for role in roles if role["name"] == "signoz-anonymous"), None
)
assert admin_role_entry is not None
org_id = admin_role_entry["orgId"]
# to be super sure of authorization server, let's validate the tuples in DB as well.
# to be super sure of authorization server, let's validate the tuples in DB as well.
# todo[@vikrantgupta25]: replace this with role memebers handler once built.
with signoz.sqlstore.conn.connect() as conn:
# verify the entry present for role assignment
@@ -131,15 +138,14 @@ def test_anonymous_user_signoz_anonymous_assignment(
sql.text("SELECT * FROM tuple WHERE object_id = :object_id"),
{"object_id": tuple_object_id},
)
tuple_row = tuple_result.mappings().fetchone()
assert tuple_row is not None
# check that the tuple if for role assignment
assert tuple_row['object_type'] == "role"
assert tuple_row['relation'] == "assignee"
assert tuple_row["object_type"] == "role"
assert tuple_row["relation"] == "assignee"
if request.config.getoption("--sqlstore-provider") == 'sqlite':
if request.config.getoption("--sqlstore-provider") == "sqlite":
user_object_id = f"organization/{org_id}/anonymous/{ANONYMOUS_USER_ID}"
assert tuple_row["user_object_type"] == "anonymous"
assert tuple_row["user_object_id"] == user_object_id
@@ -147,5 +153,3 @@ def test_anonymous_user_signoz_anonymous_assignment(
_user = f"anonymous:organization/{org_id}/anonymous/{ANONYMOUS_USER_ID}"
assert tuple_row["user_type"] == "user"
assert tuple_row["_user"] == _user

View File

@@ -1,11 +1,16 @@
import pytest
from http import HTTPStatus
from typing import Callable
import pytest
import requests
from sqlalchemy import sql
from fixtures.auth import USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD, USER_EDITOR_EMAIL, USER_EDITOR_PASSWORD
from fixtures.auth import (
USER_ADMIN_EMAIL,
USER_ADMIN_PASSWORD,
USER_EDITOR_EMAIL,
USER_EDITOR_PASSWORD,
)
from fixtures.types import Operation, SigNoz
@@ -16,7 +21,7 @@ def test_user_invite_accept_role_grant(
get_token: Callable[[str, str], str],
):
admin_token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
# invite a user as editor
invite_payload = {
"email": USER_EDITOR_EMAIL,
@@ -30,7 +35,7 @@ def test_user_invite_accept_role_grant(
)
assert invite_response.status_code == HTTPStatus.CREATED
invite_token = invite_response.json()["data"]["token"]
# accept the invite for editor
accept_payload = {
"token": invite_token,
@@ -40,7 +45,7 @@ def test_user_invite_accept_role_grant(
signoz.self.host_configs["8080"].get("/api/v1/invite/accept"),
json=accept_payload,
timeout=2,
)
)
assert accept_response.status_code == HTTPStatus.CREATED
# Login with editor email and password
@@ -53,7 +58,6 @@ def test_user_invite_accept_role_grant(
assert user_me_response.status_code == HTTPStatus.OK
editor_id = user_me_response.json()["data"]["id"]
# check the forbidden response for admin api for editor user
admin_roles_response = requests.get(
signoz.self.host_configs["8080"].get("/api/v1/roles"),
@@ -79,11 +83,11 @@ def test_user_invite_accept_role_grant(
)
tuple_row = tuple_result.mappings().fetchone()
assert tuple_row is not None
assert tuple_row['object_type'] == "role"
assert tuple_row['relation'] == "assignee"
assert tuple_row["object_type"] == "role"
assert tuple_row["relation"] == "assignee"
# verify the user tuple details depending on db provider
if request.config.getoption("--sqlstore-provider") == 'sqlite':
if request.config.getoption("--sqlstore-provider") == "sqlite":
user_object_id = f"organization/{org_id}/user/{editor_id}"
assert tuple_row["user_object_type"] == "user"
assert tuple_row["user_object_id"] == user_object_id
@@ -93,7 +97,6 @@ def test_user_invite_accept_role_grant(
assert tuple_row["_user"] == _user
def test_user_update_role_grant(
request: pytest.FixtureRequest,
signoz: SigNoz,
@@ -122,9 +125,7 @@ def test_user_update_role_grant(
org_id = roles_data[0]["orgId"]
# Update the user's role to viewer
update_payload = {
"role": "VIEWER"
}
update_payload = {"role": "VIEWER"}
update_response = requests.put(
signoz.self.host_configs["8080"].get(f"/api/v1/user/{editor_id}"),
json=update_payload,
@@ -139,7 +140,9 @@ def test_user_update_role_grant(
viewer_tuple_object_id = f"organization/{org_id}/role/signoz-viewer"
# Check there is no tuple for signoz-editor assignment
editor_tuple_result = conn.execute(
sql.text("SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"),
sql.text(
"SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"
),
{"object_id": editor_tuple_object_id},
)
for row in editor_tuple_result.mappings().fetchall():
@@ -152,13 +155,15 @@ def test_user_update_role_grant(
# Check that a tuple exists for signoz-viewer assignment
viewer_tuple_result = conn.execute(
sql.text("SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"),
sql.text(
"SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"
),
{"object_id": viewer_tuple_object_id},
)
row = viewer_tuple_result.mappings().fetchone()
assert row is not None
assert row['object_type'] == "role"
assert row['relation'] == "assignee"
assert row["object_type"] == "role"
assert row["relation"] == "assignee"
if request.config.getoption("--sqlstore-provider") == "sqlite":
user_object_id = f"organization/{org_id}/user/{editor_id}"
assert row["user_object_type"] == "user"
@@ -168,6 +173,7 @@ def test_user_update_role_grant(
assert row["user_type"] == "user"
assert row["_user"] == _user
def test_user_delete_role_revoke(
request: pytest.FixtureRequest,
signoz: SigNoz,
@@ -205,10 +211,12 @@ def test_user_delete_role_revoke(
with signoz.sqlstore.conn.connect() as conn:
tuple_result = conn.execute(
sql.text("SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"),
sql.text(
"SELECT * FROM tuple WHERE object_id = :object_id AND relation = 'assignee'"
),
{"object_id": tuple_object_id},
)
# there should NOT be any tuple for the current user assignment
tuple_rows = tuple_result.mappings().fetchall()
for row in tuple_rows:
@@ -217,4 +225,4 @@ def test_user_delete_role_revoke(
assert row["user_object_id"] != user_object_id
else:
_user = f"user:organization/{org_id}/user/{editor_id}"
assert row["_user"] != _user
assert row["_user"] != _user