mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-08 02:39:55 +00:00
Compare commits
7 Commits
update-co
...
ns/ext-api
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cd00d71478 | ||
|
|
15161c09e8 | ||
|
|
ee5fbe41eb | ||
|
|
f2f3a7b24a | ||
|
|
dd0738ac70 | ||
|
|
1c4dfc931f | ||
|
|
605d6ba17d |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -101,7 +101,7 @@
|
||||
|
||||
# Integration tests
|
||||
|
||||
/tests/integration/ @vikrantgupta25, @srikanthccv
|
||||
/tests/integration/ @vikrantgupta25
|
||||
|
||||
# OpenAPI types generator
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ linters:
|
||||
- misspell
|
||||
- nilnil
|
||||
- sloglint
|
||||
- wastedassign
|
||||
- unparam
|
||||
- unused
|
||||
settings:
|
||||
|
||||
@@ -38,7 +38,7 @@ module.exports = {
|
||||
'import', // Import/export linting
|
||||
'sonarjs', // Code quality/complexity
|
||||
// TODO: Uncomment after running: yarn add -D eslint-plugin-spellcheck
|
||||
// 'spellcheck',
|
||||
// 'spellcheck', // Correct spellings
|
||||
],
|
||||
settings: {
|
||||
react: {
|
||||
@@ -60,12 +60,18 @@ module.exports = {
|
||||
'no-debugger': 'error', // Disallows debugger statements in production code
|
||||
curly: 'error', // Requires curly braces for all control statements
|
||||
eqeqeq: ['error', 'always', { null: 'ignore' }], // Enforces === and !== (allows == null for null/undefined check)
|
||||
// TODO: Enable after fixing ~15 console.log statements
|
||||
// 'no-console': ['error', { allow: ['warn', 'error'] }], // Warns on console.log, allows console.warn/error
|
||||
'no-console': ['error', { allow: ['warn', 'error'] }], // Warns on console.log, allows console.warn/error
|
||||
|
||||
// TypeScript rules
|
||||
'@typescript-eslint/explicit-function-return-type': 'error', // Requires explicit return types on functions
|
||||
'@typescript-eslint/no-unused-vars': 'off',
|
||||
'@typescript-eslint/no-unused-vars': [
|
||||
// Disallows unused variables/args
|
||||
'error',
|
||||
{
|
||||
argsIgnorePattern: '^_', // Allows unused args prefixed with _ (e.g., _unusedParam)
|
||||
varsIgnorePattern: '^_', // Allows unused vars prefixed with _ (e.g., _unusedVar)
|
||||
},
|
||||
],
|
||||
'@typescript-eslint/no-explicit-any': 'warn', // Warns when using 'any' type (consider upgrading to error)
|
||||
// TODO: Change to 'error' after fixing ~80 empty function placeholders in providers/contexts
|
||||
'@typescript-eslint/no-empty-function': 'off', // Disallows empty function bodies
|
||||
|
||||
@@ -41,7 +41,7 @@ export const getConsumerLagDetails = async (
|
||||
> => {
|
||||
const { detailType, ...restProps } = props;
|
||||
const response = await axios.post(
|
||||
`/messaging-queues/kafka/consumer-lag/${props.detailType}`,
|
||||
`/messaging-queues/kafka/consumer-lag/${detailType}`,
|
||||
{
|
||||
...restProps,
|
||||
},
|
||||
|
||||
@@ -43,16 +43,17 @@ export const omitIdFromQuery = (query: Query | null): any => ({
|
||||
builder: {
|
||||
...query?.builder,
|
||||
queryData: query?.builder.queryData.map((queryData) => {
|
||||
const { id, ...rest } = queryData.aggregateAttribute || {};
|
||||
const { id: _aggregateAttributeId, ...rest } =
|
||||
queryData.aggregateAttribute || {};
|
||||
const newAggregateAttribute = rest;
|
||||
const newGroupByAttributes = queryData.groupBy.map((groupByAttribute) => {
|
||||
const { id, ...rest } = groupByAttribute;
|
||||
const { id: _groupByAttributeId, ...rest } = groupByAttribute;
|
||||
return rest;
|
||||
});
|
||||
const newItems = queryData.filters?.items?.map((item) => {
|
||||
const { id, ...newItem } = item;
|
||||
const { id: _itemId, ...newItem } = item;
|
||||
if (item.key) {
|
||||
const { id, ...rest } = item.key;
|
||||
const { id: _keyId, ...rest } = item.key;
|
||||
return {
|
||||
...newItem,
|
||||
key: rest,
|
||||
|
||||
@@ -45,7 +45,6 @@ function Pre({
|
||||
}
|
||||
|
||||
function Code({
|
||||
node,
|
||||
inline,
|
||||
className = 'blog-code',
|
||||
children,
|
||||
|
||||
@@ -29,7 +29,6 @@ import {
|
||||
QUERY_BUILDER_OPERATORS_BY_KEY_TYPE,
|
||||
queryOperatorSuggestions,
|
||||
} from 'constants/antlrQueryConstants';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import useDebounce from 'hooks/useDebounce';
|
||||
import { debounce, isNull } from 'lodash-es';
|
||||
@@ -208,8 +207,6 @@ function QuerySearch({
|
||||
const lastValueRef = useRef<string>('');
|
||||
const isMountedRef = useRef<boolean>(true);
|
||||
|
||||
const { handleRunQuery } = useQueryBuilder();
|
||||
|
||||
const { selectedDashboard } = useDashboard();
|
||||
|
||||
const dynamicVariables = useMemo(
|
||||
|
||||
@@ -87,7 +87,7 @@ function TraceOperatorEditor({
|
||||
// Track if the query was changed externally (from props) vs internally (user input)
|
||||
const [isExternalQueryChange, setIsExternalQueryChange] = useState(false);
|
||||
const [lastExternalValue, setLastExternalValue] = useState<string>('');
|
||||
const { currentQuery, handleRunQuery } = useQueryBuilder();
|
||||
const { currentQuery } = useQueryBuilder();
|
||||
|
||||
const queryOptions = useMemo(
|
||||
() =>
|
||||
|
||||
@@ -5,7 +5,6 @@ import { EditorView } from '@uiw/react-codemirror';
|
||||
import { getKeySuggestions } from 'api/querySuggestions/getKeySuggestions';
|
||||
import { getValueSuggestions } from 'api/querySuggestions/getValueSuggestion';
|
||||
import { initialQueriesMap } from 'constants/queryBuilder';
|
||||
import * as UseQBModule from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import { fireEvent, render, userEvent, waitFor } from 'tests/test-utils';
|
||||
import type { QueryKeyDataSuggestionsProps } from 'types/api/querySuggestions/types';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
@@ -121,13 +120,8 @@ jest.mock('api/querySuggestions/getValueSuggestion', () => ({
|
||||
// Note: We're NOT mocking CodeMirror here - using the real component
|
||||
// This provides integration testing with the actual CodeMirror editor
|
||||
|
||||
const handleRunQueryMock = ((UseQBModule as unknown) as {
|
||||
handleRunQuery: jest.MockedFunction<() => void>;
|
||||
}).handleRunQuery;
|
||||
|
||||
const SAMPLE_KEY_TYPING = 'http.';
|
||||
const SAMPLE_VALUE_TYPING_INCOMPLETE = "service.name = '";
|
||||
const SAMPLE_VALUE_TYPING_COMPLETE = "service.name = 'frontend'";
|
||||
const SAMPLE_STATUS_QUERY = "http.status_code = '200'";
|
||||
|
||||
describe('QuerySearch (Integration with Real CodeMirror)', () => {
|
||||
|
||||
@@ -796,12 +796,12 @@ export const adjustQueryForV5 = (currentQuery: Query): Query => {
|
||||
});
|
||||
|
||||
const {
|
||||
aggregateAttribute,
|
||||
aggregateOperator,
|
||||
timeAggregation,
|
||||
spaceAggregation,
|
||||
reduceTo,
|
||||
filters,
|
||||
aggregateAttribute: _aggregateAttribute,
|
||||
aggregateOperator: _aggregateOperator,
|
||||
timeAggregation: _timeAggregation,
|
||||
spaceAggregation: _spaceAggregation,
|
||||
reduceTo: _reduceTo,
|
||||
filters: _filters,
|
||||
...retainedQuery
|
||||
} = query;
|
||||
|
||||
|
||||
@@ -1,14 +1,5 @@
|
||||
import './Slider.styles.scss';
|
||||
|
||||
import { IQuickFiltersConfig } from 'components/QuickFilters/types';
|
||||
|
||||
interface ISliderProps {
|
||||
filter: IQuickFiltersConfig;
|
||||
}
|
||||
|
||||
// not needed for now build when required
|
||||
export default function Slider(props: ISliderProps): JSX.Element {
|
||||
const { filter } = props;
|
||||
console.log(filter);
|
||||
export default function Slider(): JSX.Element {
|
||||
return <div>Slider</div>;
|
||||
}
|
||||
|
||||
@@ -303,15 +303,9 @@ export default function QuickFilters(props: IQuickFiltersProps): JSX.Element {
|
||||
/>
|
||||
);
|
||||
case FiltersType.DURATION:
|
||||
return (
|
||||
<Duration
|
||||
filter={filter}
|
||||
onFilterChange={onFilterChange}
|
||||
source={source}
|
||||
/>
|
||||
);
|
||||
return <Duration filter={filter} onFilterChange={onFilterChange} />;
|
||||
case FiltersType.SLIDER:
|
||||
return <Slider filter={filter} />;
|
||||
return <Slider />;
|
||||
// eslint-disable-next-line sonarjs/no-duplicated-branches
|
||||
default:
|
||||
return (
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
/* eslint-disable react/jsx-props-no-spreading */
|
||||
import { fireEvent, render, screen } from '@testing-library/react';
|
||||
import { QueryParams } from 'constants/query';
|
||||
import { initialQueriesMap } from 'constants/queryBuilder';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { defaultPostableAlertRuleV2 } from 'container/CreateAlertV2/constants';
|
||||
import { getCreateAlertLocalStateFromAlertDef } from 'container/CreateAlertV2/utils';
|
||||
|
||||
@@ -73,7 +73,7 @@ export function sanitizeDashboardData(
|
||||
|
||||
const updatedVariables = Object.entries(selectedData.variables).reduce(
|
||||
(acc, [key, value]) => {
|
||||
const { selectedValue, ...rest } = value;
|
||||
const { selectedValue: _selectedValue, ...rest } = value;
|
||||
acc[key] = rest;
|
||||
return acc;
|
||||
},
|
||||
|
||||
@@ -9,10 +9,11 @@ import { ALERTS_DATA_SOURCE_MAP } from 'constants/alerts';
|
||||
import { ENTITY_VERSION_V4 } from 'constants/app';
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { QBShortcuts } from 'constants/shortcuts/QBShortcuts';
|
||||
import RunQueryBtn from 'container/QueryBuilder/components/RunQueryBtn/RunQueryBtn';
|
||||
import { useKeyboardHotkeys } from 'hooks/hotkeys/useKeyboardHotkeys';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import { isEmpty } from 'lodash-es';
|
||||
import { Atom, Play, Terminal } from 'lucide-react';
|
||||
import { Atom, Terminal } from 'lucide-react';
|
||||
import { useEffect, useMemo, useState } from 'react';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { AlertTypes } from 'types/api/alerts/alertTypes';
|
||||
@@ -165,9 +166,8 @@ function QuerySection({
|
||||
onChange={handleQueryCategoryChange}
|
||||
tabBarExtraContent={
|
||||
<span style={{ display: 'flex', gap: '1rem', alignItems: 'center' }}>
|
||||
<Button
|
||||
type="primary"
|
||||
onClick={(): void => {
|
||||
<RunQueryBtn
|
||||
onStageRunQuery={(): void => {
|
||||
runQuery();
|
||||
logEvent('Alert: Stage and run query', {
|
||||
dataSource: ALERTS_DATA_SOURCE_MAP[alertType],
|
||||
@@ -176,11 +176,7 @@ function QuerySection({
|
||||
queryType: queryCategory,
|
||||
});
|
||||
}}
|
||||
className="stage-run-query"
|
||||
icon={<Play size={14} />}
|
||||
>
|
||||
Stage & Run Query
|
||||
</Button>
|
||||
/>
|
||||
</span>
|
||||
}
|
||||
items={tabs}
|
||||
@@ -199,14 +195,7 @@ function QuerySection({
|
||||
onChange={handleQueryCategoryChange}
|
||||
tabBarExtraContent={
|
||||
<span style={{ display: 'flex', gap: '1rem', alignItems: 'center' }}>
|
||||
<Button
|
||||
type="primary"
|
||||
onClick={runQuery}
|
||||
className="stage-run-query"
|
||||
icon={<Play size={14} />}
|
||||
>
|
||||
Stage & Run Query
|
||||
</Button>
|
||||
<RunQueryBtn onStageRunQuery={runQuery} />
|
||||
</span>
|
||||
}
|
||||
items={items}
|
||||
|
||||
@@ -1,14 +1,6 @@
|
||||
/* eslint-disable react/display-name */
|
||||
import { PlusOutlined } from '@ant-design/icons';
|
||||
import {
|
||||
Button,
|
||||
Dropdown,
|
||||
Flex,
|
||||
Input,
|
||||
MenuProps,
|
||||
Tag,
|
||||
Typography,
|
||||
} from 'antd';
|
||||
import { Button, Flex, Input, Typography } from 'antd';
|
||||
import type { ColumnsType } from 'antd/es/table/interface';
|
||||
import saveAlertApi from 'api/alerts/save';
|
||||
import logEvent from 'api/common/logEvent';
|
||||
|
||||
@@ -91,7 +91,7 @@ function Summary(): JSX.Element {
|
||||
const queryFiltersWithoutId = useMemo(() => {
|
||||
const filtersWithoutId = {
|
||||
...queryFilters,
|
||||
items: queryFilters.items.map(({ id, ...rest }) => rest),
|
||||
items: queryFilters.items.map(({ id: _id, ...rest }) => rest),
|
||||
};
|
||||
return JSON.stringify(filtersWithoutId);
|
||||
}, [queryFilters]);
|
||||
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
getDefaultWidgetData,
|
||||
PANEL_TYPE_TO_QUERY_TYPES,
|
||||
} from 'container/NewWidget/utils';
|
||||
import RunQueryBtn from 'container/QueryBuilder/components/RunQueryBtn/RunQueryBtn';
|
||||
// import { QueryBuilder } from 'container/QueryBuilder';
|
||||
import { QueryBuilderProps } from 'container/QueryBuilder/QueryBuilder.interfaces';
|
||||
import { useKeyboardHotkeys } from 'hooks/hotkeys/useKeyboardHotkeys';
|
||||
@@ -20,7 +21,7 @@ import { useShareBuilderUrl } from 'hooks/queryBuilder/useShareBuilderUrl';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import useUrlQuery from 'hooks/useUrlQuery';
|
||||
import { defaultTo, isUndefined } from 'lodash-es';
|
||||
import { Atom, Play, Terminal } from 'lucide-react';
|
||||
import { Atom, Terminal } from 'lucide-react';
|
||||
import { useDashboard } from 'providers/Dashboard/Dashboard';
|
||||
import {
|
||||
getNextWidgets,
|
||||
@@ -28,20 +29,14 @@ import {
|
||||
getSelectedWidgetIndex,
|
||||
} from 'providers/Dashboard/util';
|
||||
import { useCallback, useEffect, useMemo } from 'react';
|
||||
import { UseQueryResult } from 'react-query';
|
||||
import { SuccessResponse } from 'types/api';
|
||||
import { Widgets } from 'types/api/dashboard/getAll';
|
||||
import { MetricRangePayloadProps } from 'types/api/metrics/getQueryRange';
|
||||
import { Query } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { EQueryType } from 'types/common/dashboard';
|
||||
|
||||
import ClickHouseQueryContainer from './QueryBuilder/clickHouse';
|
||||
import PromQLQueryContainer from './QueryBuilder/promQL';
|
||||
|
||||
function QuerySection({
|
||||
selectedGraph,
|
||||
queryResponse,
|
||||
}: QueryProps): JSX.Element {
|
||||
function QuerySection({ selectedGraph }: QueryProps): JSX.Element {
|
||||
const {
|
||||
currentQuery,
|
||||
handleRunQuery: handleRunQueryFromQueryBuilder,
|
||||
@@ -242,15 +237,7 @@ function QuerySection({
|
||||
tabBarExtraContent={
|
||||
<span style={{ display: 'flex', gap: '1rem', alignItems: 'center' }}>
|
||||
<TextToolTip text="This will temporarily save the current query and graph state. This will persist across tab change" />
|
||||
<Button
|
||||
loading={queryResponse.isFetching}
|
||||
type="primary"
|
||||
onClick={handleRunQuery}
|
||||
className="stage-run-query"
|
||||
icon={<Play size={14} />}
|
||||
>
|
||||
Stage & Run Query
|
||||
</Button>
|
||||
<RunQueryBtn label="Stage & Run Query" onStageRunQuery={handleRunQuery} />
|
||||
</span>
|
||||
}
|
||||
items={items}
|
||||
@@ -261,10 +248,6 @@ function QuerySection({
|
||||
|
||||
interface QueryProps {
|
||||
selectedGraph: PANEL_TYPES;
|
||||
queryResponse: UseQueryResult<
|
||||
SuccessResponse<MetricRangePayloadProps, unknown>,
|
||||
Error
|
||||
>;
|
||||
}
|
||||
|
||||
export default QuerySection;
|
||||
|
||||
@@ -64,7 +64,7 @@ function LeftContainer({
|
||||
enableDrillDown={enableDrillDown}
|
||||
/>
|
||||
<QueryContainer className="query-section-left-container">
|
||||
<QuerySection selectedGraph={selectedGraph} queryResponse={queryResponse} />
|
||||
<QuerySection selectedGraph={selectedGraph} />
|
||||
{selectedGraph === PANEL_TYPES.LIST && (
|
||||
<ExplorerColumnsRenderer
|
||||
selectedLogFields={selectedLogFields}
|
||||
|
||||
@@ -166,7 +166,7 @@ function UpdateContextLinks({
|
||||
onSave(newContextLink);
|
||||
} catch (error) {
|
||||
// Form validation failed, don't call onSave
|
||||
console.log('Form validation failed:', error);
|
||||
console.error('Form validation failed:', error);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -0,0 +1,37 @@
|
||||
.run-query-btn {
|
||||
display: flex;
|
||||
min-width: 132px;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
|
||||
.ant-btn-icon {
|
||||
margin: 0 !important;
|
||||
}
|
||||
}
|
||||
|
||||
.cancel-query-btn {
|
||||
display: flex;
|
||||
min-width: 132px;
|
||||
align-items: center;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
.cmd-hint {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
gap: 2px;
|
||||
padding: 2px 4px;
|
||||
border-radius: 4px;
|
||||
//not using var here to support opacity 60%. To be handled at design system level.
|
||||
background: rgba(35, 38, 46, 0.6);
|
||||
line-height: 1;
|
||||
font-size: 10px;
|
||||
}
|
||||
|
||||
.lightMode {
|
||||
.cmd-hint {
|
||||
color: var(--bg-ink-200);
|
||||
//not using var here to support opacity 60%. To be handled at design system level.
|
||||
background: rgba(231, 232, 236, 0.8);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,53 @@
|
||||
import './RunQueryBtn.scss';
|
||||
|
||||
import { Button } from 'antd';
|
||||
import {
|
||||
ChevronUp,
|
||||
Command,
|
||||
CornerDownLeft,
|
||||
Loader2,
|
||||
Play,
|
||||
} from 'lucide-react';
|
||||
import { getUserOperatingSystem, UserOperatingSystem } from 'utils/getUserOS';
|
||||
|
||||
interface RunQueryBtnProps {
|
||||
label?: string;
|
||||
isLoadingQueries?: boolean;
|
||||
handleCancelQuery?: () => void;
|
||||
onStageRunQuery?: () => void;
|
||||
}
|
||||
|
||||
function RunQueryBtn({
|
||||
label,
|
||||
isLoadingQueries,
|
||||
handleCancelQuery,
|
||||
onStageRunQuery,
|
||||
}: RunQueryBtnProps): JSX.Element {
|
||||
const isMac = getUserOperatingSystem() === UserOperatingSystem.MACOS;
|
||||
return isLoadingQueries ? (
|
||||
<Button
|
||||
type="default"
|
||||
icon={<Loader2 size={14} className="loading-icon animate-spin" />}
|
||||
className="cancel-query-btn periscope-btn danger"
|
||||
onClick={handleCancelQuery}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
) : (
|
||||
<Button
|
||||
type="primary"
|
||||
className="run-query-btn periscope-btn primary"
|
||||
disabled={isLoadingQueries || !onStageRunQuery}
|
||||
onClick={onStageRunQuery}
|
||||
icon={<Play size={14} />}
|
||||
>
|
||||
{label || 'Run Query'}
|
||||
<div className="cmd-hint">
|
||||
{isMac ? <Command size={12} /> : <ChevronUp size={12} />}
|
||||
<CornerDownLeft size={12} />
|
||||
</div>
|
||||
</Button>
|
||||
);
|
||||
}
|
||||
|
||||
export default RunQueryBtn;
|
||||
@@ -0,0 +1,82 @@
|
||||
// frontend/src/container/QueryBuilder/components/RunQueryBtn/__tests__/RunQueryBtn.test.tsx
|
||||
import { fireEvent, render, screen } from '@testing-library/react';
|
||||
|
||||
import RunQueryBtn from '../RunQueryBtn';
|
||||
|
||||
// Mock OS util
|
||||
jest.mock('utils/getUserOS', () => ({
|
||||
getUserOperatingSystem: jest.fn(),
|
||||
UserOperatingSystem: { MACOS: 'mac', WINDOWS: 'win', LINUX: 'linux' },
|
||||
}));
|
||||
import { getUserOperatingSystem, UserOperatingSystem } from 'utils/getUserOS';
|
||||
|
||||
describe('RunQueryBtn', () => {
|
||||
test('renders run state and triggers on click', () => {
|
||||
(getUserOperatingSystem as jest.Mock).mockReturnValue(
|
||||
UserOperatingSystem.MACOS,
|
||||
);
|
||||
const onRun = jest.fn();
|
||||
render(<RunQueryBtn onStageRunQuery={onRun} />);
|
||||
const btn = screen.getByRole('button', { name: /run query/i });
|
||||
expect(btn).toBeEnabled();
|
||||
fireEvent.click(btn);
|
||||
expect(onRun).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
test('disabled when onStageRunQuery is undefined', () => {
|
||||
(getUserOperatingSystem as jest.Mock).mockReturnValue(
|
||||
UserOperatingSystem.MACOS,
|
||||
);
|
||||
render(<RunQueryBtn />);
|
||||
expect(screen.getByRole('button', { name: /run query/i })).toBeDisabled();
|
||||
});
|
||||
|
||||
test('shows cancel state and calls handleCancelQuery', () => {
|
||||
(getUserOperatingSystem as jest.Mock).mockReturnValue(
|
||||
UserOperatingSystem.MACOS,
|
||||
);
|
||||
const onCancel = jest.fn();
|
||||
render(<RunQueryBtn isLoadingQueries handleCancelQuery={onCancel} />);
|
||||
const cancel = screen.getByRole('button', { name: /cancel/i });
|
||||
fireEvent.click(cancel);
|
||||
expect(onCancel).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
test('shows Command + CornerDownLeft on mac', () => {
|
||||
(getUserOperatingSystem as jest.Mock).mockReturnValue(
|
||||
UserOperatingSystem.MACOS,
|
||||
);
|
||||
const { container } = render(
|
||||
<RunQueryBtn onStageRunQuery={(): void => {}} />,
|
||||
);
|
||||
expect(container.querySelector('.lucide-command')).toBeInTheDocument();
|
||||
expect(
|
||||
container.querySelector('.lucide-corner-down-left'),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
test('shows ChevronUp + CornerDownLeft on non-mac', () => {
|
||||
(getUserOperatingSystem as jest.Mock).mockReturnValue(
|
||||
UserOperatingSystem.WINDOWS,
|
||||
);
|
||||
const { container } = render(
|
||||
<RunQueryBtn onStageRunQuery={(): void => {}} />,
|
||||
);
|
||||
expect(container.querySelector('.lucide-chevron-up')).toBeInTheDocument();
|
||||
expect(container.querySelector('.lucide-command')).not.toBeInTheDocument();
|
||||
expect(
|
||||
container.querySelector('.lucide-corner-down-left'),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
test('renders custom label when provided', () => {
|
||||
(getUserOperatingSystem as jest.Mock).mockReturnValue(
|
||||
UserOperatingSystem.MACOS,
|
||||
);
|
||||
const onRun = jest.fn();
|
||||
render(<RunQueryBtn onStageRunQuery={onRun} label="Stage & Run Query" />);
|
||||
expect(
|
||||
screen.getByRole('button', { name: /stage & run query/i }),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
@@ -1,12 +1,12 @@
|
||||
import './ToolbarActions.styles.scss';
|
||||
|
||||
import { Button } from 'antd';
|
||||
import { LogsExplorerShortcuts } from 'constants/shortcuts/logsExplorerShortcuts';
|
||||
import { useKeyboardHotkeys } from 'hooks/hotkeys/useKeyboardHotkeys';
|
||||
import { Loader2, Play } from 'lucide-react';
|
||||
import { MutableRefObject, useEffect } from 'react';
|
||||
import { useQueryClient } from 'react-query';
|
||||
|
||||
import RunQueryBtn from '../RunQueryBtn/RunQueryBtn';
|
||||
|
||||
interface RightToolbarActionsProps {
|
||||
onStageRunQuery: () => void;
|
||||
isLoadingQueries?: boolean;
|
||||
@@ -42,14 +42,7 @@ export default function RightToolbarActions({
|
||||
if (showLiveLogs) {
|
||||
return (
|
||||
<div className="right-toolbar-actions-container">
|
||||
<Button
|
||||
type="primary"
|
||||
className="run-query-btn periscope-btn primary"
|
||||
disabled
|
||||
icon={<Play size={14} />}
|
||||
>
|
||||
Run Query
|
||||
</Button>
|
||||
<RunQueryBtn />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@@ -65,26 +58,11 @@ export default function RightToolbarActions({
|
||||
|
||||
return (
|
||||
<div className="right-toolbar-actions-container">
|
||||
{isLoadingQueries ? (
|
||||
<Button
|
||||
type="default"
|
||||
icon={<Loader2 size={14} className="loading-icon animate-spin" />}
|
||||
className="cancel-query-btn periscope-btn danger"
|
||||
onClick={handleCancelQuery}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
) : (
|
||||
<Button
|
||||
type="primary"
|
||||
className="run-query-btn periscope-btn primary"
|
||||
disabled={isLoadingQueries}
|
||||
onClick={onStageRunQuery}
|
||||
icon={<Play size={14} />}
|
||||
>
|
||||
Run Query
|
||||
</Button>
|
||||
)}
|
||||
<RunQueryBtn
|
||||
isLoadingQueries={isLoadingQueries}
|
||||
handleCancelQuery={handleCancelQuery}
|
||||
onStageRunQuery={onStageRunQuery}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -136,7 +136,6 @@ const useAggregateDrilldown = ({
|
||||
query,
|
||||
// panelType,
|
||||
aggregateData: aggregateDataWithTimeRange,
|
||||
widgetId,
|
||||
onClose,
|
||||
});
|
||||
|
||||
|
||||
@@ -129,7 +129,6 @@ const useBaseAggregateOptions = ({
|
||||
|
||||
const handleBaseDrilldown = useCallback(
|
||||
(key: string): void => {
|
||||
console.log('Base drilldown:', { key, aggregateData });
|
||||
const route = getRoute(key);
|
||||
const timeRange = aggregateData?.timeRange;
|
||||
const filtersToAdd = aggregateData?.filters || [];
|
||||
|
||||
@@ -17,7 +17,6 @@ interface UseBaseAggregateOptionsProps {
|
||||
query?: Query;
|
||||
// panelType?: PANEL_TYPES;
|
||||
aggregateData?: AggregateData | null;
|
||||
widgetId?: string;
|
||||
onClose: () => void;
|
||||
}
|
||||
|
||||
@@ -27,7 +26,6 @@ const useDashboardVarConfig = ({
|
||||
query,
|
||||
// panelType,
|
||||
aggregateData,
|
||||
widgetId,
|
||||
onClose,
|
||||
}: UseBaseAggregateOptionsProps): {
|
||||
dashbaordVariablesConfig: {
|
||||
@@ -83,11 +81,6 @@ const useDashboardVarConfig = ({
|
||||
dashboardVar: [string, IDashboardVariable],
|
||||
fieldValue: any,
|
||||
) => {
|
||||
console.log('Setting variable:', {
|
||||
fieldName,
|
||||
dashboardVarId: dashboardVar[0],
|
||||
fieldValue,
|
||||
});
|
||||
onValueUpdate(fieldName, dashboardVar[1]?.id, fieldValue, false);
|
||||
onClose();
|
||||
},
|
||||
@@ -96,10 +89,6 @@ const useDashboardVarConfig = ({
|
||||
|
||||
const handleUnsetVariable = useCallback(
|
||||
(fieldName: string, dashboardVar: [string, IDashboardVariable]) => {
|
||||
console.log('Unsetting variable:', {
|
||||
fieldName,
|
||||
dashboardVarId: dashboardVar[0],
|
||||
});
|
||||
onValueUpdate(fieldName, dashboardVar[0], null, false);
|
||||
onClose();
|
||||
},
|
||||
@@ -109,12 +98,6 @@ const useDashboardVarConfig = ({
|
||||
const handleCreateVariable = useCallback(
|
||||
(fieldName: string, fieldValue: string | number | boolean) => {
|
||||
const source = getSourceFromQuery();
|
||||
console.log('Creating variable from drilldown:', {
|
||||
fieldName,
|
||||
fieldValue,
|
||||
source,
|
||||
widgetId,
|
||||
});
|
||||
createVariable(
|
||||
fieldName,
|
||||
fieldValue,
|
||||
@@ -125,7 +108,7 @@ const useDashboardVarConfig = ({
|
||||
);
|
||||
onClose();
|
||||
},
|
||||
[createVariable, getSourceFromQuery, widgetId, onClose],
|
||||
[createVariable, getSourceFromQuery, onClose],
|
||||
);
|
||||
|
||||
const contextItems = useMemo(
|
||||
|
||||
@@ -19,20 +19,6 @@
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
|
||||
.cancel-query-btn {
|
||||
min-width: 96px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 2px;
|
||||
}
|
||||
|
||||
.run-query-btn {
|
||||
min-width: 96px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 2px;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,8 +31,8 @@ export const normalizeSteps = (steps: FunnelStepData[]): FunnelStepData[] => {
|
||||
...step.filters,
|
||||
items: step.filters.items.map((item) => {
|
||||
const {
|
||||
id: unusedId,
|
||||
isIndexed,
|
||||
id: _unusedId,
|
||||
isIndexed: _isIndexed,
|
||||
...keyObj
|
||||
} = item.key as BaseAutocompleteData;
|
||||
return {
|
||||
|
||||
@@ -143,7 +143,7 @@ export const useValidateFunnelSteps = ({
|
||||
selectedTime,
|
||||
steps.map((step) => {
|
||||
// eslint-disable-next-line @typescript-eslint/naming-convention
|
||||
const { latency_type, ...rest } = step;
|
||||
const { latency_type: _latency_type, ...rest } = step;
|
||||
return rest;
|
||||
}),
|
||||
],
|
||||
|
||||
@@ -55,7 +55,7 @@ const useDragColumns = <T>(storageKey: LOCALSTORAGE): UseDragColumns<T> => {
|
||||
const parsedDraggedColumns = await JSON.parse(localStorageColumns);
|
||||
nextDraggedColumns = parsedDraggedColumns;
|
||||
} catch (e) {
|
||||
console.log('error while parsing json');
|
||||
console.error('error while parsing json: ', e);
|
||||
} finally {
|
||||
redirectWithDraggedColumns(nextDraggedColumns);
|
||||
}
|
||||
|
||||
@@ -4,11 +4,6 @@ import parser from 'lib/logql/parser';
|
||||
describe('lib/logql/parser', () => {
|
||||
test('parse valid queries', () => {
|
||||
logqlQueries.forEach((queryObject) => {
|
||||
try {
|
||||
parser(queryObject.query);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
expect(parser(queryObject.query)).toEqual(queryObject.parsedQuery);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -4,11 +4,7 @@ import { reverseParser } from 'lib/logql/reverseParser';
|
||||
describe('lib/logql/reverseParser', () => {
|
||||
test('reverse parse valid queries', () => {
|
||||
logqlQueries.forEach((queryObject) => {
|
||||
try {
|
||||
expect(reverseParser(queryObject.parsedQuery)).toEqual(queryObject.query);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
}
|
||||
expect(reverseParser(queryObject.parsedQuery)).toEqual(queryObject.query);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -11,8 +11,8 @@ describe('getYAxisScale', () => {
|
||||
keyIndex: 1,
|
||||
thresholdValue: 10,
|
||||
thresholdUnit: 'percentunit',
|
||||
moveThreshold(dragIndex, hoverIndex): void {
|
||||
console.log(dragIndex, hoverIndex);
|
||||
moveThreshold(): void {
|
||||
// no-op
|
||||
},
|
||||
selectedGraph: PANEL_TYPES.TIME_SERIES,
|
||||
},
|
||||
@@ -21,8 +21,8 @@ describe('getYAxisScale', () => {
|
||||
keyIndex: 2,
|
||||
thresholdValue: 20,
|
||||
thresholdUnit: 'percentunit',
|
||||
moveThreshold(dragIndex, hoverIndex): void {
|
||||
console.log(dragIndex, hoverIndex);
|
||||
moveThreshold(): void {
|
||||
// no-op
|
||||
},
|
||||
selectedGraph: PANEL_TYPES.TIME_SERIES,
|
||||
},
|
||||
|
||||
@@ -9,7 +9,6 @@ import {
|
||||
MessagingQueuesPayloadProps,
|
||||
} from 'api/messagingQueues/getConsumerLagDetails';
|
||||
import axios from 'axios';
|
||||
import { isNumber } from 'chart.js/helpers';
|
||||
import cx from 'classnames';
|
||||
import { ColumnTypeRender } from 'components/Logs/TableView/types';
|
||||
import { SOMETHING_WENT_WRONG } from 'constants/api';
|
||||
|
||||
@@ -275,7 +275,7 @@ export function setConfigDetail(
|
||||
},
|
||||
): void {
|
||||
// remove "key" and its value from the paramsToSet object
|
||||
const { key, ...restParamsToSet } = paramsToSet || {};
|
||||
const { key: _key, ...restParamsToSet } = paramsToSet || {};
|
||||
|
||||
if (!isEmpty(restParamsToSet)) {
|
||||
const configDetail = {
|
||||
|
||||
@@ -145,7 +145,7 @@ export const removeFilter = (
|
||||
const updatedValues = prevValue.filter((item: any) => item !== value);
|
||||
|
||||
if (updatedValues.length === 0) {
|
||||
const { [filterType]: item, ...remainingFilters } = prevFilters;
|
||||
const { [filterType]: _item, ...remainingFilters } = prevFilters;
|
||||
return Object.keys(remainingFilters).length > 0
|
||||
? (remainingFilters as FilterType)
|
||||
: undefined;
|
||||
@@ -175,7 +175,7 @@ export const removeAllFilters = (
|
||||
return prevFilters;
|
||||
}
|
||||
|
||||
const { [filterType]: item, ...remainingFilters } = prevFilters;
|
||||
const { [filterType]: _item, ...remainingFilters } = prevFilters;
|
||||
|
||||
return Object.keys(remainingFilters).length > 0
|
||||
? (remainingFilters as Record<
|
||||
|
||||
@@ -20,8 +20,7 @@ export const parseQueryIntoSpanKind = (
|
||||
current = parsedValue;
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
console.log('error while parsing json');
|
||||
console.error('error while parsing json: ', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ export const parseQueryIntoCurrent = (
|
||||
current = parseInt(parsedValue, 10);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('error while parsing json');
|
||||
console.error('error while parsing json: ', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,8 +20,7 @@ export const parseQueryIntoOrder = (
|
||||
current = parsedValue;
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
console.log('error while parsing json');
|
||||
console.error('error while parsing json: ', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,8 +20,7 @@ export const parseAggregateOrderParams = (
|
||||
current = parsedValue;
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
console.log('error while parsing json');
|
||||
console.error('error while parsing json: ', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -81,8 +81,10 @@ func FilterIntermediateColumns(result *qbtypes.QueryRangeResponse) *qbtypes.Quer
|
||||
// Filter out columns for intermediate queries used only in formulas
|
||||
filteredColumns := make([]*qbtypes.ColumnDescriptor, 0)
|
||||
intermediateQueryNames := map[string]bool{
|
||||
"error": true,
|
||||
"total_span": true,
|
||||
"error": true,
|
||||
"total_span": true,
|
||||
"endpoints_current": true,
|
||||
"endpoints_legacy": true,
|
||||
}
|
||||
|
||||
columnIndices := make([]int, 0)
|
||||
@@ -296,15 +298,15 @@ func BuildDomainList(req *thirdpartyapitypes.ThirdPartyApiRequest) (*qbtypes.Que
|
||||
return nil, err
|
||||
}
|
||||
|
||||
queries := []qbtypes.QueryEnvelope{
|
||||
buildEndpointsQuery(req),
|
||||
queries := buildEndpointsQueries(req)
|
||||
queries = append(queries,
|
||||
buildLastSeenQuery(req),
|
||||
buildRpsQuery(req),
|
||||
buildErrorQuery(req),
|
||||
buildTotalSpanQuery(req),
|
||||
buildP99Query(req),
|
||||
buildErrorRateFormula(),
|
||||
}
|
||||
)
|
||||
|
||||
return &qbtypes.QueryRangeRequest{
|
||||
SchemaVersion: "v5",
|
||||
@@ -346,20 +348,58 @@ func BuildDomainInfo(req *thirdpartyapitypes.ThirdPartyApiRequest) (*qbtypes.Que
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildEndpointsQuery(req *thirdpartyapitypes.ThirdPartyApiRequest) qbtypes.QueryEnvelope {
|
||||
return qbtypes.QueryEnvelope{
|
||||
// buildEndpointsQueries returns queries for counting distinct URLs with semconv fallback.
|
||||
// It uses two queries with mutually exclusive filters:
|
||||
// - endpoints_current: count_distinct(url.full) WHERE url.full EXISTS
|
||||
// - endpoints_legacy: count_distinct(http.url) WHERE url.full NOT EXISTS
|
||||
// And a formula to combine them: endpoints_current + endpoints_legacy
|
||||
func buildEndpointsQueries(req *thirdpartyapitypes.ThirdPartyApiRequest) []qbtypes.QueryEnvelope {
|
||||
// Query for current semconv (url.full)
|
||||
currentFilter := buildBaseFilter(req.Filter)
|
||||
currentFilter.Expression = fmt.Sprintf("(%s) AND %s EXISTS", currentFilter.Expression, urlPathKey)
|
||||
|
||||
endpointsCurrent := qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||
Name: "endpoints",
|
||||
Name: "endpoints_current",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
StepInterval: qbtypes.Step{Duration: defaultStepInterval},
|
||||
Aggregations: []qbtypes.TraceAggregation{
|
||||
{Expression: "count_distinct(http.url)"},
|
||||
{Expression: fmt.Sprintf("count_distinct(%s)", urlPathKey)},
|
||||
},
|
||||
Filter: buildBaseFilter(req.Filter),
|
||||
Filter: currentFilter,
|
||||
GroupBy: mergeGroupBy(dualSemconvGroupByKeys["server"], req.GroupBy),
|
||||
},
|
||||
}
|
||||
|
||||
// Query for legacy semconv (http.url) - only when url.full doesn't exist
|
||||
legacyFilter := buildBaseFilter(req.Filter)
|
||||
legacyFilter.Expression = fmt.Sprintf("(%s) AND %s NOT EXISTS", legacyFilter.Expression, urlPathKey)
|
||||
|
||||
endpointsLegacy := qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.TraceAggregation]{
|
||||
Name: "endpoints_legacy",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
StepInterval: qbtypes.Step{Duration: defaultStepInterval},
|
||||
Aggregations: []qbtypes.TraceAggregation{
|
||||
{Expression: fmt.Sprintf("count_distinct(%s)", urlPathKeyLegacy)},
|
||||
},
|
||||
Filter: legacyFilter,
|
||||
GroupBy: mergeGroupBy(dualSemconvGroupByKeys["server"], req.GroupBy),
|
||||
},
|
||||
}
|
||||
|
||||
// Formula to combine both counts
|
||||
endpointsFormula := qbtypes.QueryEnvelope{
|
||||
Type: qbtypes.QueryTypeFormula,
|
||||
Spec: qbtypes.QueryBuilderFormula{
|
||||
Name: "endpoints",
|
||||
Expression: "endpoints_current + endpoints_legacy",
|
||||
},
|
||||
}
|
||||
|
||||
return []qbtypes.QueryEnvelope{endpointsCurrent, endpointsLegacy, endpointsFormula}
|
||||
}
|
||||
|
||||
func buildLastSeenQuery(req *thirdpartyapitypes.ThirdPartyApiRequest) qbtypes.QueryEnvelope {
|
||||
|
||||
@@ -36,29 +36,6 @@ func NewBucketCache(settings factory.ProviderSettings, cache cache.Cache, cacheT
|
||||
}
|
||||
}
|
||||
|
||||
// cachedBucket represents a cached time bucket
|
||||
type cachedBucket struct {
|
||||
StartMs uint64 `json:"startMs"`
|
||||
EndMs uint64 `json:"endMs"`
|
||||
Type qbtypes.RequestType `json:"type"`
|
||||
Value json.RawMessage `json:"value"`
|
||||
Stats qbtypes.ExecStats `json:"stats"`
|
||||
}
|
||||
|
||||
// cachedData represents the full cached data for a query
|
||||
type cachedData struct {
|
||||
Buckets []*cachedBucket `json:"buckets"`
|
||||
Warnings []string `json:"warnings"`
|
||||
}
|
||||
|
||||
func (c *cachedData) UnmarshalBinary(data []byte) error {
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
func (c *cachedData) MarshalBinary() ([]byte, error) {
|
||||
return json.Marshal(c)
|
||||
}
|
||||
|
||||
// GetMissRanges returns cached data and missing time ranges
|
||||
func (bc *bucketCache) GetMissRanges(
|
||||
ctx context.Context,
|
||||
@@ -78,7 +55,7 @@ func (bc *bucketCache) GetMissRanges(
|
||||
bc.logger.DebugContext(ctx, "cache key", "cache_key", cacheKey)
|
||||
|
||||
// Try to get cached data
|
||||
var data cachedData
|
||||
var data qbtypes.CachedData
|
||||
err := bc.cache.Get(ctx, orgID, cacheKey, &data)
|
||||
if err != nil {
|
||||
if !errors.Ast(err, errors.TypeNotFound) {
|
||||
@@ -147,9 +124,9 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
|
||||
cacheKey := bc.generateCacheKey(q)
|
||||
|
||||
// Get existing cached data
|
||||
var existingData cachedData
|
||||
var existingData qbtypes.CachedData
|
||||
if err := bc.cache.Get(ctx, orgID, cacheKey, &existingData); err != nil {
|
||||
existingData = cachedData{}
|
||||
existingData = qbtypes.CachedData{}
|
||||
}
|
||||
|
||||
// Trim the result to exclude data within flux interval
|
||||
@@ -203,7 +180,7 @@ func (bc *bucketCache) Put(ctx context.Context, orgID valuer.UUID, q qbtypes.Que
|
||||
uniqueWarnings := bc.deduplicateWarnings(allWarnings)
|
||||
|
||||
// Create updated cached data
|
||||
updatedData := cachedData{
|
||||
updatedData := qbtypes.CachedData{
|
||||
Buckets: mergedBuckets,
|
||||
Warnings: uniqueWarnings,
|
||||
}
|
||||
@@ -222,7 +199,7 @@ func (bc *bucketCache) generateCacheKey(q qbtypes.Query) string {
|
||||
}
|
||||
|
||||
// findMissingRangesWithStep identifies time ranges not covered by cached buckets with step alignment
|
||||
func (bc *bucketCache) findMissingRangesWithStep(buckets []*cachedBucket, startMs, endMs uint64, stepMs uint64) []*qbtypes.TimeRange {
|
||||
func (bc *bucketCache) findMissingRangesWithStep(buckets []*qbtypes.CachedBucket, startMs, endMs uint64, stepMs uint64) []*qbtypes.TimeRange {
|
||||
// When step is 0 or window is too small to be cached, use simple algorithm
|
||||
if stepMs == 0 || (startMs+stepMs) > endMs {
|
||||
return bc.findMissingRangesBasic(buckets, startMs, endMs)
|
||||
@@ -265,7 +242,7 @@ func (bc *bucketCache) findMissingRangesWithStep(buckets []*cachedBucket, startM
|
||||
}
|
||||
|
||||
if needsSort {
|
||||
slices.SortFunc(buckets, func(a, b *cachedBucket) int {
|
||||
slices.SortFunc(buckets, func(a, b *qbtypes.CachedBucket) int {
|
||||
if a.StartMs < b.StartMs {
|
||||
return -1
|
||||
}
|
||||
@@ -339,7 +316,7 @@ func (bc *bucketCache) findMissingRangesWithStep(buckets []*cachedBucket, startM
|
||||
}
|
||||
|
||||
// findMissingRangesBasic is the simple algorithm without step alignment
|
||||
func (bc *bucketCache) findMissingRangesBasic(buckets []*cachedBucket, startMs, endMs uint64) []*qbtypes.TimeRange {
|
||||
func (bc *bucketCache) findMissingRangesBasic(buckets []*qbtypes.CachedBucket, startMs, endMs uint64) []*qbtypes.TimeRange {
|
||||
// Check if already sorted before sorting
|
||||
needsSort := false
|
||||
for i := 1; i < len(buckets); i++ {
|
||||
@@ -350,7 +327,7 @@ func (bc *bucketCache) findMissingRangesBasic(buckets []*cachedBucket, startMs,
|
||||
}
|
||||
|
||||
if needsSort {
|
||||
slices.SortFunc(buckets, func(a, b *cachedBucket) int {
|
||||
slices.SortFunc(buckets, func(a, b *qbtypes.CachedBucket) int {
|
||||
if a.StartMs < b.StartMs {
|
||||
return -1
|
||||
}
|
||||
@@ -421,9 +398,9 @@ func (bc *bucketCache) findMissingRangesBasic(buckets []*cachedBucket, startMs,
|
||||
}
|
||||
|
||||
// filterRelevantBuckets returns buckets that overlap with the requested time range
|
||||
func (bc *bucketCache) filterRelevantBuckets(buckets []*cachedBucket, startMs, endMs uint64) []*cachedBucket {
|
||||
func (bc *bucketCache) filterRelevantBuckets(buckets []*qbtypes.CachedBucket, startMs, endMs uint64) []*qbtypes.CachedBucket {
|
||||
// Pre-allocate with estimated capacity
|
||||
relevant := make([]*cachedBucket, 0, len(buckets))
|
||||
relevant := make([]*qbtypes.CachedBucket, 0, len(buckets))
|
||||
|
||||
for _, bucket := range buckets {
|
||||
// Check if bucket overlaps with requested range
|
||||
@@ -433,7 +410,7 @@ func (bc *bucketCache) filterRelevantBuckets(buckets []*cachedBucket, startMs, e
|
||||
}
|
||||
|
||||
// Sort by start time
|
||||
slices.SortFunc(relevant, func(a, b *cachedBucket) int {
|
||||
slices.SortFunc(relevant, func(a, b *qbtypes.CachedBucket) int {
|
||||
if a.StartMs < b.StartMs {
|
||||
return -1
|
||||
}
|
||||
@@ -447,7 +424,7 @@ func (bc *bucketCache) filterRelevantBuckets(buckets []*cachedBucket, startMs, e
|
||||
}
|
||||
|
||||
// mergeBuckets combines multiple cached buckets into a single result
|
||||
func (bc *bucketCache) mergeBuckets(ctx context.Context, buckets []*cachedBucket, warnings []string) *qbtypes.Result {
|
||||
func (bc *bucketCache) mergeBuckets(ctx context.Context, buckets []*qbtypes.CachedBucket, warnings []string) *qbtypes.Result {
|
||||
if len(buckets) == 0 {
|
||||
return &qbtypes.Result{}
|
||||
}
|
||||
@@ -480,7 +457,7 @@ func (bc *bucketCache) mergeBuckets(ctx context.Context, buckets []*cachedBucket
|
||||
}
|
||||
|
||||
// mergeTimeSeriesValues merges time series data from multiple buckets
|
||||
func (bc *bucketCache) mergeTimeSeriesValues(ctx context.Context, buckets []*cachedBucket) *qbtypes.TimeSeriesData {
|
||||
func (bc *bucketCache) mergeTimeSeriesValues(ctx context.Context, buckets []*qbtypes.CachedBucket) *qbtypes.TimeSeriesData {
|
||||
// Estimate capacity based on bucket count
|
||||
estimatedSeries := len(buckets) * 10
|
||||
|
||||
@@ -631,7 +608,7 @@ func (bc *bucketCache) isEmptyResult(result *qbtypes.Result) (isEmpty bool, isFi
|
||||
}
|
||||
|
||||
// resultToBuckets converts a query result into time-based buckets
|
||||
func (bc *bucketCache) resultToBuckets(ctx context.Context, result *qbtypes.Result, startMs, endMs uint64) []*cachedBucket {
|
||||
func (bc *bucketCache) resultToBuckets(ctx context.Context, result *qbtypes.Result, startMs, endMs uint64) []*qbtypes.CachedBucket {
|
||||
// Check if result is empty
|
||||
isEmpty, isFiltered := bc.isEmptyResult(result)
|
||||
|
||||
@@ -652,7 +629,7 @@ func (bc *bucketCache) resultToBuckets(ctx context.Context, result *qbtypes.Resu
|
||||
|
||||
// Always create a bucket, even for empty filtered results
|
||||
// This ensures we don't re-query for data that doesn't exist
|
||||
return []*cachedBucket{
|
||||
return []*qbtypes.CachedBucket{
|
||||
{
|
||||
StartMs: startMs,
|
||||
EndMs: endMs,
|
||||
@@ -664,9 +641,9 @@ func (bc *bucketCache) resultToBuckets(ctx context.Context, result *qbtypes.Resu
|
||||
}
|
||||
|
||||
// mergeAndDeduplicateBuckets combines and deduplicates bucket lists
|
||||
func (bc *bucketCache) mergeAndDeduplicateBuckets(existing, fresh []*cachedBucket) []*cachedBucket {
|
||||
func (bc *bucketCache) mergeAndDeduplicateBuckets(existing, fresh []*qbtypes.CachedBucket) []*qbtypes.CachedBucket {
|
||||
// Create a map to deduplicate by time range
|
||||
bucketMap := make(map[string]*cachedBucket)
|
||||
bucketMap := make(map[string]*qbtypes.CachedBucket)
|
||||
|
||||
// Add existing buckets
|
||||
for _, bucket := range existing {
|
||||
@@ -681,13 +658,13 @@ func (bc *bucketCache) mergeAndDeduplicateBuckets(existing, fresh []*cachedBucke
|
||||
}
|
||||
|
||||
// Convert back to slice with pre-allocated capacity
|
||||
result := make([]*cachedBucket, 0, len(bucketMap))
|
||||
result := make([]*qbtypes.CachedBucket, 0, len(bucketMap))
|
||||
for _, bucket := range bucketMap {
|
||||
result = append(result, bucket)
|
||||
}
|
||||
|
||||
// Sort by start time
|
||||
slices.SortFunc(result, func(a, b *cachedBucket) int {
|
||||
slices.SortFunc(result, func(a, b *qbtypes.CachedBucket) int {
|
||||
if a.StartMs < b.StartMs {
|
||||
return -1
|
||||
}
|
||||
|
||||
@@ -147,14 +147,14 @@ func BenchmarkBucketCache_MergeTimeSeriesValues(b *testing.B) {
|
||||
for _, tc := range testCases {
|
||||
b.Run(tc.name, func(b *testing.B) {
|
||||
// Create test buckets
|
||||
buckets := make([]*cachedBucket, tc.numBuckets)
|
||||
buckets := make([]*qbtypes.CachedBucket, tc.numBuckets)
|
||||
for i := 0; i < tc.numBuckets; i++ {
|
||||
startMs := uint64(i * 10000)
|
||||
endMs := uint64((i + 1) * 10000)
|
||||
result := createBenchmarkResultWithSeries(startMs, endMs, 1000, tc.numSeries, tc.numValues)
|
||||
|
||||
valueBytes, _ := json.Marshal(result.Value)
|
||||
buckets[i] = &cachedBucket{
|
||||
buckets[i] = &qbtypes.CachedBucket{
|
||||
StartMs: startMs,
|
||||
EndMs: endMs,
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
@@ -417,8 +417,8 @@ func createBenchmarkResultWithSeries(startMs, endMs uint64, _ uint64, numSeries,
|
||||
}
|
||||
|
||||
// Helper function to create buckets with specific gap patterns
|
||||
func createBucketsWithPattern(numBuckets int, pattern string) []*cachedBucket {
|
||||
buckets := make([]*cachedBucket, 0, numBuckets)
|
||||
func createBucketsWithPattern(numBuckets int, pattern string) []*qbtypes.CachedBucket {
|
||||
buckets := make([]*qbtypes.CachedBucket, 0, numBuckets)
|
||||
|
||||
for i := 0; i < numBuckets; i++ {
|
||||
// Skip some buckets based on pattern
|
||||
@@ -432,7 +432,7 @@ func createBucketsWithPattern(numBuckets int, pattern string) []*cachedBucket {
|
||||
startMs := uint64(i * 10000)
|
||||
endMs := uint64((i + 1) * 10000)
|
||||
|
||||
buckets = append(buckets, &cachedBucket{
|
||||
buckets = append(buckets, &qbtypes.CachedBucket{
|
||||
StartMs: startMs,
|
||||
EndMs: endMs,
|
||||
Type: qbtypes.RequestTypeTimeSeries,
|
||||
|
||||
@@ -521,7 +521,7 @@ func TestBucketCache_FindMissingRanges_EdgeCases(t *testing.T) {
|
||||
bc := NewBucketCache(instrumentationtest.New().ToProviderSettings(), memCache, cacheTTL, defaultFluxInterval).(*bucketCache)
|
||||
|
||||
// Test with buckets that have gaps and overlaps
|
||||
buckets := []*cachedBucket{
|
||||
buckets := []*qbtypes.CachedBucket{
|
||||
{StartMs: 1000, EndMs: 2000},
|
||||
{StartMs: 2500, EndMs: 3500},
|
||||
{StartMs: 3000, EndMs: 4000}, // Overlaps with previous
|
||||
@@ -1097,7 +1097,7 @@ func TestBucketCache_FindMissingRangesWithStep(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
buckets []*cachedBucket
|
||||
buckets []*qbtypes.CachedBucket
|
||||
startMs uint64
|
||||
endMs uint64
|
||||
stepMs uint64
|
||||
@@ -1106,7 +1106,7 @@ func TestBucketCache_FindMissingRangesWithStep(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "start_not_aligned_to_step",
|
||||
buckets: []*cachedBucket{},
|
||||
buckets: []*qbtypes.CachedBucket{},
|
||||
startMs: 1500, // Not aligned to 1000ms step
|
||||
endMs: 5000,
|
||||
stepMs: 1000,
|
||||
@@ -1118,7 +1118,7 @@ func TestBucketCache_FindMissingRangesWithStep(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "end_not_aligned_to_step",
|
||||
buckets: []*cachedBucket{},
|
||||
buckets: []*qbtypes.CachedBucket{},
|
||||
startMs: 1000,
|
||||
endMs: 4500, // Not aligned to 1000ms step
|
||||
stepMs: 1000,
|
||||
@@ -1129,7 +1129,7 @@ func TestBucketCache_FindMissingRangesWithStep(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "bucket_boundaries_not_aligned",
|
||||
buckets: []*cachedBucket{
|
||||
buckets: []*qbtypes.CachedBucket{
|
||||
{StartMs: 1500, EndMs: 2500}, // Not aligned
|
||||
},
|
||||
startMs: 1000,
|
||||
@@ -1143,7 +1143,7 @@ func TestBucketCache_FindMissingRangesWithStep(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "small_window_less_than_step",
|
||||
buckets: []*cachedBucket{},
|
||||
buckets: []*qbtypes.CachedBucket{},
|
||||
startMs: 1000,
|
||||
endMs: 1500, // Less than one step
|
||||
stepMs: 1000,
|
||||
@@ -1154,7 +1154,7 @@ func TestBucketCache_FindMissingRangesWithStep(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "zero_step_uses_basic_algorithm",
|
||||
buckets: []*cachedBucket{},
|
||||
buckets: []*qbtypes.CachedBucket{},
|
||||
startMs: 1000,
|
||||
endMs: 5000,
|
||||
stepMs: 0,
|
||||
|
||||
@@ -279,7 +279,7 @@ func (c *conditionBuilder) buildSpanScopeCondition(key *telemetrytypes.Telemetry
|
||||
return "", errors.NewInvalidInputf(errors.CodeInvalidInput, "span scope field %s only supports '=' operator", key.Name)
|
||||
}
|
||||
|
||||
isTrue := false
|
||||
var isTrue bool
|
||||
switch v := value.(type) {
|
||||
case bool:
|
||||
isTrue = v
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
@@ -131,12 +133,44 @@ func (q *QueryBuilderQuery[T]) UnmarshalJSON(data []byte) error {
|
||||
|
||||
var temp Alias
|
||||
// Use UnmarshalJSONWithContext for better error messages
|
||||
if err := UnmarshalJSONWithContext(data, &temp, "query spec"); err != nil {
|
||||
if err := UnmarshalJSONWithContext(data, &temp, fmt.Sprintf("query spec for %T", q)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the decoded values back to the original struct
|
||||
*q = QueryBuilderQuery[T](temp)
|
||||
|
||||
// Nomarlize the query after unmarshaling
|
||||
q.Normalize()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Normalize normalizes all the field keys in the query
|
||||
func (q *QueryBuilderQuery[T]) Normalize() {
|
||||
|
||||
// normalize select fields
|
||||
for idx := range q.SelectFields {
|
||||
q.SelectFields[idx].Normalize()
|
||||
}
|
||||
|
||||
// normalize group by fields
|
||||
for idx := range q.GroupBy {
|
||||
q.GroupBy[idx].Normalize()
|
||||
}
|
||||
|
||||
// normalize order by fields
|
||||
for idx := range q.Order {
|
||||
q.Order[idx].Key.Normalize()
|
||||
}
|
||||
|
||||
// normalize secondary aggregations
|
||||
for idx := range q.SecondaryAggregations {
|
||||
for jdx := range q.SecondaryAggregations[idx].Order {
|
||||
q.SecondaryAggregations[idx].Order[jdx].Key.Normalize()
|
||||
}
|
||||
for jdx := range q.SecondaryAggregations[idx].GroupBy {
|
||||
q.SecondaryAggregations[idx].GroupBy[jdx].Normalize()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -0,0 +1,653 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestQueryBuilderQuery_Copy(t *testing.T) {
|
||||
t.Run("copy with all fields populated", func(t *testing.T) {
|
||||
original := QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
StepInterval: Step{Duration: 60 * time.Second},
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
Source: telemetrytypes.SourceUnspecified,
|
||||
Aggregations: []TraceAggregation{
|
||||
{
|
||||
Expression: "count()",
|
||||
Alias: "trace_count",
|
||||
},
|
||||
},
|
||||
Disabled: false,
|
||||
Filter: &Filter{
|
||||
Expression: "service.name = 'frontend'",
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Direction: OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
SelectFields: []telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "trace_id",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
Cursor: "cursor123",
|
||||
LimitBy: &LimitBy{
|
||||
Value: "10",
|
||||
Keys: []string{
|
||||
"service.name",
|
||||
},
|
||||
},
|
||||
Having: &Having{
|
||||
Expression: "count() > 100",
|
||||
},
|
||||
SecondaryAggregations: []SecondaryAggregation{
|
||||
{
|
||||
Limit: 10,
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "value",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Direction: OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "region",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Functions: []Function{
|
||||
{
|
||||
Name: FunctionNameTimeShift,
|
||||
Args: []FunctionArg{
|
||||
{
|
||||
Name: "shift",
|
||||
Value: "1h",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Legend: "{{service.name}}",
|
||||
ShiftBy: 3600000,
|
||||
}
|
||||
|
||||
// Create a copy
|
||||
copied := original.Copy()
|
||||
|
||||
// Assert that values are equal
|
||||
assert.Equal(t, original.Name, copied.Name)
|
||||
assert.Equal(t, original.StepInterval, copied.StepInterval)
|
||||
assert.Equal(t, original.Signal, copied.Signal)
|
||||
assert.Equal(t, original.Source, copied.Source)
|
||||
assert.Equal(t, original.Disabled, copied.Disabled)
|
||||
assert.Equal(t, original.Limit, copied.Limit)
|
||||
assert.Equal(t, original.Offset, copied.Offset)
|
||||
assert.Equal(t, original.Cursor, copied.Cursor)
|
||||
assert.Equal(t, original.Legend, copied.Legend)
|
||||
assert.Equal(t, original.ShiftBy, copied.ShiftBy)
|
||||
|
||||
// Assert deep copies for slices and pointers
|
||||
require.NotNil(t, copied.Aggregations)
|
||||
assert.Equal(t, len(original.Aggregations), len(copied.Aggregations))
|
||||
assert.Equal(t, original.Aggregations[0].Expression, copied.Aggregations[0].Expression)
|
||||
|
||||
require.NotNil(t, copied.Filter)
|
||||
assert.Equal(t, original.Filter.Expression, copied.Filter.Expression)
|
||||
|
||||
require.NotNil(t, copied.GroupBy)
|
||||
assert.Equal(t, len(original.GroupBy), len(copied.GroupBy))
|
||||
assert.Equal(t, original.GroupBy[0].Name, copied.GroupBy[0].Name)
|
||||
|
||||
require.NotNil(t, copied.Order)
|
||||
assert.Equal(t, len(original.Order), len(copied.Order))
|
||||
assert.Equal(t, original.Order[0].Key.Name, copied.Order[0].Key.Name)
|
||||
|
||||
require.NotNil(t, copied.SelectFields)
|
||||
assert.Equal(t, len(original.SelectFields), len(copied.SelectFields))
|
||||
assert.Equal(t, original.SelectFields[0].Name, copied.SelectFields[0].Name)
|
||||
|
||||
require.NotNil(t, copied.LimitBy)
|
||||
assert.Equal(t, original.LimitBy.Value, copied.LimitBy.Value)
|
||||
assert.Equal(t, len(original.LimitBy.Keys), len(copied.LimitBy.Keys))
|
||||
|
||||
require.NotNil(t, copied.Having)
|
||||
assert.Equal(t, original.Having.Expression, copied.Having.Expression)
|
||||
|
||||
require.NotNil(t, copied.SecondaryAggregations)
|
||||
assert.Equal(t, len(original.SecondaryAggregations), len(copied.SecondaryAggregations))
|
||||
assert.Equal(t, original.SecondaryAggregations[0].Limit, copied.SecondaryAggregations[0].Limit)
|
||||
|
||||
require.NotNil(t, copied.Functions)
|
||||
assert.Equal(t, len(original.Functions), len(copied.Functions))
|
||||
assert.Equal(t, original.Functions[0].Name, copied.Functions[0].Name)
|
||||
|
||||
// Verify independence - modify copied and ensure original is unchanged
|
||||
copied.Name = "B"
|
||||
assert.Equal(t, "A", original.Name)
|
||||
|
||||
copied.Aggregations[0].Expression = "sum()"
|
||||
assert.Equal(t, "count()", original.Aggregations[0].Expression)
|
||||
|
||||
copied.Filter.Expression = "modified"
|
||||
assert.Equal(t, "service.name = 'frontend'", original.Filter.Expression)
|
||||
|
||||
copied.GroupBy[0].Name = "modified"
|
||||
assert.Equal(t, "service.name", original.GroupBy[0].Name)
|
||||
|
||||
copied.Order[0].Key.Name = "modified"
|
||||
assert.Equal(t, "timestamp", original.Order[0].Key.Name)
|
||||
|
||||
copied.SelectFields[0].Name = "modified"
|
||||
assert.Equal(t, "trace_id", original.SelectFields[0].Name)
|
||||
|
||||
copied.LimitBy.Value = "999"
|
||||
assert.Equal(t, "10", original.LimitBy.Value)
|
||||
|
||||
copied.Having.Expression = "modified"
|
||||
assert.Equal(t, "count() > 100", original.Having.Expression)
|
||||
|
||||
copied.SecondaryAggregations[0].Limit = 999
|
||||
assert.Equal(t, 10, original.SecondaryAggregations[0].Limit)
|
||||
|
||||
copied.Functions[0].Name = FunctionNameAbsolute
|
||||
assert.Equal(t, FunctionNameTimeShift, original.Functions[0].Name)
|
||||
})
|
||||
|
||||
t.Run("copy with nil fields", func(t *testing.T) {
|
||||
original := QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
}
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
assert.Equal(t, original.Name, copied.Name)
|
||||
assert.Equal(t, original.Signal, copied.Signal)
|
||||
assert.Nil(t, copied.Aggregations)
|
||||
assert.Nil(t, copied.Filter)
|
||||
assert.Nil(t, copied.GroupBy)
|
||||
assert.Nil(t, copied.Order)
|
||||
assert.Nil(t, copied.SelectFields)
|
||||
assert.Nil(t, copied.LimitBy)
|
||||
assert.Nil(t, copied.Having)
|
||||
assert.Nil(t, copied.SecondaryAggregations)
|
||||
assert.Nil(t, copied.Functions)
|
||||
})
|
||||
|
||||
t.Run("copy metric aggregation query", func(t *testing.T) {
|
||||
original := QueryBuilderQuery[MetricAggregation]{
|
||||
Name: "M",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []MetricAggregation{
|
||||
{
|
||||
MetricName: "cpu_usage",
|
||||
SpaceAggregation: metrictypes.SpaceAggregationAvg,
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
assert.Equal(t, original.Name, copied.Name)
|
||||
assert.Equal(t, original.Signal, copied.Signal)
|
||||
require.NotNil(t, copied.Aggregations)
|
||||
assert.Equal(t, original.Aggregations[0].MetricName, copied.Aggregations[0].MetricName)
|
||||
assert.Equal(t, original.Aggregations[0].SpaceAggregation, copied.Aggregations[0].SpaceAggregation)
|
||||
|
||||
// Verify independence
|
||||
copied.Aggregations[0].MetricName = "modified"
|
||||
assert.Equal(t, "cpu_usage", original.Aggregations[0].MetricName)
|
||||
})
|
||||
|
||||
t.Run("copy log aggregation query", func(t *testing.T) {
|
||||
original := QueryBuilderQuery[LogAggregation]{
|
||||
Name: "L",
|
||||
Signal: telemetrytypes.SignalLogs,
|
||||
Aggregations: []LogAggregation{
|
||||
{
|
||||
Expression: "count()",
|
||||
Alias: "log_count",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
copied := original.Copy()
|
||||
|
||||
assert.Equal(t, original.Name, copied.Name)
|
||||
assert.Equal(t, original.Signal, copied.Signal)
|
||||
require.NotNil(t, copied.Aggregations)
|
||||
assert.Equal(t, original.Aggregations[0].Expression, copied.Aggregations[0].Expression)
|
||||
|
||||
// Verify independence
|
||||
copied.Aggregations[0].Expression = "sum()"
|
||||
assert.Equal(t, "count()", original.Aggregations[0].Expression)
|
||||
})
|
||||
}
|
||||
|
||||
func TestQueryBuilderQuery_Normalize(t *testing.T) {
|
||||
t.Run("normalize select fields", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
SelectFields: []telemetrytypes.TelemetryFieldKey{
|
||||
{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
{
|
||||
Name: "span.name",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query.Normalize()
|
||||
|
||||
// Normalize only changes FieldContext, not the Name
|
||||
assert.Equal(t, "service.name", query.SelectFields[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.SelectFields[0].FieldContext)
|
||||
assert.Equal(t, "span.name", query.SelectFields[1].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextSpan, query.SelectFields[1].FieldContext)
|
||||
})
|
||||
|
||||
t.Run("normalize group by fields", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query.Normalize()
|
||||
|
||||
assert.Equal(t, "service.name", query.GroupBy[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.GroupBy[0].FieldContext)
|
||||
})
|
||||
|
||||
t.Run("normalize order by fields", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "timestamp",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Direction: OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query.Normalize()
|
||||
|
||||
assert.Equal(t, "timestamp", query.Order[0].Key.Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextSpan, query.Order[0].Key.FieldContext)
|
||||
})
|
||||
|
||||
t.Run("normalize secondary aggregations", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
SecondaryAggregations: []SecondaryAggregation{
|
||||
{
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "value",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
Direction: OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "region",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query.Normalize()
|
||||
|
||||
assert.Equal(t, "value", query.SecondaryAggregations[0].Order[0].Key.Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextSpan, query.SecondaryAggregations[0].Order[0].Key.FieldContext)
|
||||
assert.Equal(t, "region", query.SecondaryAggregations[0].GroupBy[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.SecondaryAggregations[0].GroupBy[0].FieldContext)
|
||||
})
|
||||
|
||||
t.Run("normalize with nil fields", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalTraces,
|
||||
}
|
||||
|
||||
// Should not panic
|
||||
query.Normalize()
|
||||
|
||||
assert.Equal(t, "A", query.Name)
|
||||
})
|
||||
|
||||
t.Run("normalize all fields together", func(t *testing.T) {
|
||||
query := QueryBuilderQuery[TraceAggregation]{
|
||||
SelectFields: []telemetrytypes.TelemetryFieldKey{
|
||||
{Name: "service.name", FieldContext: telemetrytypes.FieldContextResource},
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "host.name",
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
},
|
||||
},
|
||||
},
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "duration",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecondaryAggregations: []SecondaryAggregation{
|
||||
{
|
||||
Order: []OrderBy{
|
||||
{
|
||||
Key: OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "count",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
GroupBy: []GroupByKey{
|
||||
{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "status.code",
|
||||
FieldContext: telemetrytypes.FieldContextSpan,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
query.Normalize()
|
||||
|
||||
assert.Equal(t, "service.name", query.SelectFields[0].Name)
|
||||
assert.Equal(t, "host.name", query.GroupBy[0].Name)
|
||||
assert.Equal(t, "duration", query.Order[0].Key.Name)
|
||||
assert.Equal(t, "count", query.SecondaryAggregations[0].Order[0].Key.Name)
|
||||
assert.Equal(t, "status.code", query.SecondaryAggregations[0].GroupBy[0].Name)
|
||||
})
|
||||
}
|
||||
|
||||
func TestQueryBuilderQuery_UnmarshalJSON(t *testing.T) {
|
||||
t.Run("valid trace query", func(t *testing.T) {
|
||||
jsonData := `{
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"stepInterval": 60,
|
||||
"aggregations": [{
|
||||
"expression": "count()",
|
||||
"alias": "trace_count"
|
||||
}],
|
||||
"filter": {
|
||||
"expression": "service.name = 'frontend'"
|
||||
},
|
||||
"groupBy": [{
|
||||
"name": "service.name",
|
||||
"fieldContext": "resource"
|
||||
}],
|
||||
"order": [{
|
||||
"key": {
|
||||
"name": "service.name",
|
||||
"fieldContext": "resource"
|
||||
},
|
||||
"direction": "desc"
|
||||
}],
|
||||
"limit": 100
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[TraceAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "A", query.Name)
|
||||
assert.Equal(t, telemetrytypes.SignalTraces, query.Signal)
|
||||
assert.Equal(t, int64(60000), query.StepInterval.Milliseconds())
|
||||
assert.Equal(t, 1, len(query.Aggregations))
|
||||
assert.Equal(t, "count()", query.Aggregations[0].Expression)
|
||||
assert.Equal(t, "trace_count", query.Aggregations[0].Alias)
|
||||
require.NotNil(t, query.Filter)
|
||||
assert.Equal(t, "service.name = 'frontend'", query.Filter.Expression)
|
||||
assert.Equal(t, 1, len(query.GroupBy))
|
||||
assert.Equal(t, "service.name", query.GroupBy[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.GroupBy[0].FieldContext)
|
||||
assert.Equal(t, 1, len(query.Order))
|
||||
assert.Equal(t, "service.name", query.Order[0].Key.Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.Order[0].Key.FieldContext)
|
||||
assert.Equal(t, OrderDirectionDesc, query.Order[0].Direction)
|
||||
assert.Equal(t, 100, query.Limit)
|
||||
})
|
||||
|
||||
t.Run("valid metric query", func(t *testing.T) {
|
||||
jsonData := `{
|
||||
"name": "M",
|
||||
"signal": "metrics",
|
||||
"stepInterval": "1m",
|
||||
"aggregations": [{
|
||||
"metricName": "cpu_usage",
|
||||
"spaceAggregation": "avg",
|
||||
"timeAggregation": "avg"
|
||||
}]
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[MetricAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "M", query.Name)
|
||||
assert.Equal(t, telemetrytypes.SignalMetrics, query.Signal)
|
||||
assert.Equal(t, int64(60000), query.StepInterval.Milliseconds())
|
||||
assert.Equal(t, 1, len(query.Aggregations))
|
||||
assert.Equal(t, "cpu_usage", query.Aggregations[0].MetricName)
|
||||
})
|
||||
|
||||
t.Run("valid log query", func(t *testing.T) {
|
||||
jsonData := `{
|
||||
"name": "L",
|
||||
"signal": "logs",
|
||||
"aggregations": [{
|
||||
"expression": "count()",
|
||||
"alias": "log_count"
|
||||
}]
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[LogAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "L", query.Name)
|
||||
assert.Equal(t, telemetrytypes.SignalLogs, query.Signal)
|
||||
assert.Equal(t, 1, len(query.Aggregations))
|
||||
assert.Equal(t, "count()", query.Aggregations[0].Expression)
|
||||
})
|
||||
|
||||
t.Run("unknown field error", func(t *testing.T) {
|
||||
jsonData := `{
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"unknownField": "value"
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[TraceAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "unknown field")
|
||||
})
|
||||
|
||||
t.Run("query with all optional fields", func(t *testing.T) {
|
||||
// NOTE: This json payload is not realistic, just for testing all fields
|
||||
jsonData := `{
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"stepInterval": "5m",
|
||||
"source": "traces",
|
||||
"aggregations": [{
|
||||
"expression": "count()",
|
||||
"alias": "span.count"
|
||||
}],
|
||||
"disabled": true,
|
||||
"filter": {
|
||||
"expression": "service.name = 'api'"
|
||||
},
|
||||
"groupBy": [{
|
||||
"name": "service.name",
|
||||
"fieldContext": "resource"
|
||||
}],
|
||||
"order": [{
|
||||
"key": {
|
||||
"name": "timestamp",
|
||||
"fieldContext": "span"
|
||||
},
|
||||
"direction": "asc"
|
||||
}],
|
||||
"selectFields": [{
|
||||
"name": "trace_id",
|
||||
"fieldContext": "span"
|
||||
}],
|
||||
"limit": 50,
|
||||
"offset": 10,
|
||||
"cursor": "cursor123",
|
||||
"limitBy": {
|
||||
"value": "5",
|
||||
"keys": ["service.name"]
|
||||
},
|
||||
"having": {
|
||||
"expression": "count() > 10"
|
||||
},
|
||||
"secondaryAggregations": [{
|
||||
"limit": 20,
|
||||
"order": [{
|
||||
"key": {
|
||||
"name": "value",
|
||||
"fieldContext": "span"
|
||||
},
|
||||
"direction": "desc"
|
||||
}],
|
||||
"groupBy": [{
|
||||
"name": "region",
|
||||
"fieldContext": "resource"
|
||||
}]
|
||||
}],
|
||||
"functions": [{
|
||||
"name": "timeShift",
|
||||
"args": [{
|
||||
"name": "shift",
|
||||
"value": "1h"
|
||||
}]
|
||||
}],
|
||||
"legend": "{{service.name}}"
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[TraceAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "A", query.Name)
|
||||
assert.Equal(t, telemetrytypes.SignalTraces, query.Signal)
|
||||
assert.Equal(t, int64(300000), query.StepInterval.Milliseconds())
|
||||
// Source is set in the JSON, so it should be "traces", not SourceUnspecified
|
||||
assert.Equal(t, "traces", query.Source.String.StringValue())
|
||||
assert.True(t, query.Disabled)
|
||||
assert.Equal(t, query.Aggregations[0].Expression, "count()")
|
||||
assert.Equal(t, query.Aggregations[0].Alias, "span.count")
|
||||
assert.NotNil(t, query.Filter)
|
||||
assert.NotNil(t, query.GroupBy)
|
||||
assert.NotNil(t, query.Order)
|
||||
assert.NotNil(t, query.SelectFields)
|
||||
assert.Equal(t, 50, query.Limit)
|
||||
assert.Equal(t, 10, query.Offset)
|
||||
assert.Equal(t, "cursor123", query.Cursor)
|
||||
assert.NotNil(t, query.LimitBy)
|
||||
assert.NotNil(t, query.Having)
|
||||
assert.NotNil(t, query.SecondaryAggregations)
|
||||
assert.NotNil(t, query.Functions)
|
||||
assert.Equal(t, "{{service.name}}", query.Legend)
|
||||
})
|
||||
|
||||
t.Run("normalization happens during unmarshaling", func(t *testing.T) {
|
||||
jsonData := `{
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"selectFields": [{
|
||||
"name": "resource.service.name"
|
||||
}],
|
||||
"groupBy": [{
|
||||
"name": "resource.host.name"
|
||||
}],
|
||||
"order": [{
|
||||
"key": {
|
||||
"name": "span.duration"
|
||||
},
|
||||
"direction": "desc"
|
||||
}]
|
||||
}`
|
||||
|
||||
var query QueryBuilderQuery[TraceAggregation]
|
||||
err := json.Unmarshal([]byte(jsonData), &query)
|
||||
require.NoError(t, err)
|
||||
|
||||
// FieldContext should be normalized, Name should remain as-is
|
||||
assert.Equal(t, "service.name", query.SelectFields[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.SelectFields[0].FieldContext)
|
||||
assert.Equal(t, "host.name", query.GroupBy[0].Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextResource, query.GroupBy[0].FieldContext)
|
||||
assert.Equal(t, "duration", query.Order[0].Key.Name)
|
||||
assert.Equal(t, telemetrytypes.FieldContextSpan, query.Order[0].Key.FieldContext)
|
||||
})
|
||||
}
|
||||
61
pkg/types/querybuildertypes/querybuildertypesv5/cached.go
Normal file
61
pkg/types/querybuildertypes/querybuildertypesv5/cached.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"maps"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/cachetypes"
|
||||
)
|
||||
|
||||
var _ cachetypes.Cacheable = (*CachedData)(nil)
|
||||
|
||||
type CachedBucket struct {
|
||||
StartMs uint64 `json:"startMs"`
|
||||
EndMs uint64 `json:"endMs"`
|
||||
Type RequestType `json:"type"`
|
||||
Value json.RawMessage `json:"value"`
|
||||
Stats ExecStats `json:"stats"`
|
||||
}
|
||||
|
||||
func (c *CachedBucket) Clone() *CachedBucket {
|
||||
return &CachedBucket{
|
||||
StartMs: c.StartMs,
|
||||
EndMs: c.EndMs,
|
||||
Type: c.Type,
|
||||
Value: bytes.Clone(c.Value),
|
||||
Stats: ExecStats{
|
||||
RowsScanned: c.Stats.RowsScanned,
|
||||
BytesScanned: c.Stats.BytesScanned,
|
||||
DurationMS: c.Stats.DurationMS,
|
||||
StepIntervals: maps.Clone(c.Stats.StepIntervals),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CachedData represents the full cached data for a query
|
||||
type CachedData struct {
|
||||
Buckets []*CachedBucket `json:"buckets"`
|
||||
Warnings []string `json:"warnings"`
|
||||
}
|
||||
|
||||
func (c *CachedData) UnmarshalBinary(data []byte) error {
|
||||
return json.Unmarshal(data, c)
|
||||
}
|
||||
|
||||
func (c *CachedData) MarshalBinary() ([]byte, error) {
|
||||
return json.Marshal(c)
|
||||
}
|
||||
|
||||
func (c *CachedData) Clone() cachetypes.Cacheable {
|
||||
clonedCachedData := new(CachedData)
|
||||
clonedCachedData.Buckets = make([]*CachedBucket, len(c.Buckets))
|
||||
for i := range c.Buckets {
|
||||
clonedCachedData.Buckets[i] = c.Buckets[i].Clone()
|
||||
}
|
||||
|
||||
clonedCachedData.Warnings = make([]string, len(c.Warnings))
|
||||
copy(clonedCachedData.Warnings, c.Warnings)
|
||||
|
||||
return clonedCachedData
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
package querybuildertypesv5
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func createBuckets_TimeSeries(numBuckets int) []*CachedBucket {
|
||||
buckets := make([]*CachedBucket, numBuckets)
|
||||
|
||||
for i := 0; i < numBuckets; i++ {
|
||||
startMs := uint64(i * 10000)
|
||||
endMs := uint64((i + 1) * 10000)
|
||||
|
||||
timeSeriesData := &TimeSeriesData{
|
||||
QueryName: "A",
|
||||
Aggregations: []*AggregationBucket{
|
||||
{
|
||||
Index: 0,
|
||||
Series: []*TimeSeries{
|
||||
{
|
||||
Labels: []*Label{
|
||||
{Key: telemetrytypes.TelemetryFieldKey{Name: "service"}, Value: "test"},
|
||||
},
|
||||
Values: []*TimeSeriesValue{
|
||||
{Timestamp: 1672563720000, Value: 1, Partial: true}, // 12:02
|
||||
{Timestamp: 1672563900000, Value: 2}, // 12:05
|
||||
{Timestamp: 1672564200000, Value: 2.5}, // 12:10
|
||||
{Timestamp: 1672564500000, Value: 2.6}, // 12:15
|
||||
{Timestamp: 1672566600000, Value: 2.9}, // 12:50
|
||||
{Timestamp: 1672566900000, Value: 3}, // 12:55
|
||||
{Timestamp: 1672567080000, Value: 4, Partial: true}, // 12:58
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
value, err := json.Marshal(timeSeriesData)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
buckets[i] = &CachedBucket{
|
||||
StartMs: startMs,
|
||||
EndMs: endMs,
|
||||
Type: RequestTypeTimeSeries,
|
||||
Value: json.RawMessage(value),
|
||||
Stats: ExecStats{
|
||||
RowsScanned: uint64(i * 500),
|
||||
BytesScanned: uint64(i * 10000),
|
||||
DurationMS: uint64(i * 1000),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return buckets
|
||||
}
|
||||
|
||||
func BenchmarkCachedData_JSONMarshal_10kbuckets(b *testing.B) {
|
||||
buckets := createBuckets_TimeSeries(10000)
|
||||
data := &CachedData{Buckets: buckets}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := json.Marshal(data)
|
||||
assert.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCachedData_Clone_10kbuckets(b *testing.B) {
|
||||
buckets := createBuckets_TimeSeries(10000)
|
||||
data := &CachedData{Buckets: buckets}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_ = data.Clone()
|
||||
}
|
||||
}
|
||||
@@ -47,19 +47,19 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
switch header.Signal {
|
||||
case telemetrytypes.SignalTraces:
|
||||
var spec QueryBuilderQuery[TraceAggregation]
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid trace builder query spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
case telemetrytypes.SignalLogs:
|
||||
var spec QueryBuilderQuery[LogAggregation]
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid log builder query spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
case telemetrytypes.SignalMetrics:
|
||||
var spec QueryBuilderQuery[MetricAggregation]
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "query spec"); err != nil {
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid metric builder query spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
@@ -75,6 +75,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeFormula:
|
||||
var spec QueryBuilderFormula
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderFormula
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "formula spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid formula spec: %v", err)
|
||||
}
|
||||
@@ -82,6 +83,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeJoin:
|
||||
var spec QueryBuilderJoin
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for QueryBuilderJoin
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "join spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid join spec: %v", err)
|
||||
}
|
||||
@@ -89,13 +91,14 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeTraceOperator:
|
||||
var spec QueryBuilderTraceOperator
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "trace operator spec"); err != nil {
|
||||
if err := json.Unmarshal(shadow.Spec, &spec); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid trace operator spec: %v", err)
|
||||
}
|
||||
q.Spec = spec
|
||||
|
||||
case QueryTypePromQL:
|
||||
var spec PromQuery
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for PromQuery
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "PromQL spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid PromQL spec: %v", err)
|
||||
}
|
||||
@@ -103,6 +106,7 @@ func (q *QueryEnvelope) UnmarshalJSON(data []byte) error {
|
||||
|
||||
case QueryTypeClickHouseSQL:
|
||||
var spec ClickHouseQuery
|
||||
// TODO: use json.Unmarshal here after implementing custom unmarshaler for ClickHouseQuery
|
||||
if err := UnmarshalJSONWithContext(shadow.Spec, &spec, "ClickHouse SQL spec"); err != nil {
|
||||
return wrapUnmarshalError(err, "invalid ClickHouse SQL spec: %v", err)
|
||||
}
|
||||
|
||||
@@ -366,6 +366,45 @@ func (q QueryBuilderTraceOperator) Copy() QueryBuilderTraceOperator {
|
||||
return c
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements custom JSON unmarshaling to disallow unknown fields
|
||||
func (q *QueryBuilderTraceOperator) UnmarshalJSON(data []byte) error {
|
||||
// Define a type alias to avoid infinite recursion
|
||||
type Alias QueryBuilderTraceOperator
|
||||
|
||||
var temp Alias
|
||||
// Use UnmarshalJSONWithContext for better error messages
|
||||
if err := UnmarshalJSONWithContext(data, &temp, "query spec"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Copy the decoded values back to the original struct
|
||||
*q = QueryBuilderTraceOperator(temp)
|
||||
|
||||
// Nomarlize the query after unmarshaling
|
||||
q.Normalize()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Normalize normalizes all the field keys in the query
|
||||
func (q *QueryBuilderTraceOperator) Normalize() {
|
||||
|
||||
// normalize select fields
|
||||
for idx := range q.SelectFields {
|
||||
q.SelectFields[idx].Normalize()
|
||||
}
|
||||
|
||||
// normalize group by fields
|
||||
for idx := range q.GroupBy {
|
||||
q.GroupBy[idx].Normalize()
|
||||
}
|
||||
|
||||
// normalize order by fields
|
||||
for idx := range q.Order {
|
||||
q.Order[idx].Key.Normalize()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ValidateUniqueTraceOperator ensures only one trace operator exists in queries
|
||||
func ValidateUniqueTraceOperator(queries []QueryEnvelope) error {
|
||||
traceOperatorCount := 0
|
||||
|
||||
@@ -51,63 +51,118 @@ func (f TelemetryFieldKey) String() string {
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// GetFieldKeyFromKeyText returns a TelemetryFieldKey from a key text.
|
||||
// The key text is expected to be in the format of `fieldContext.fieldName:fieldDataType` in the search query.
|
||||
func GetFieldKeyFromKeyText(key string) TelemetryFieldKey {
|
||||
func (f TelemetryFieldKey) Text() string {
|
||||
return TelemetryFieldKeyToText(&f)
|
||||
}
|
||||
|
||||
keyTextParts := strings.Split(key, ".")
|
||||
// Normalize parses and normalizes a TelemetryFieldKey by extracting
|
||||
// the field context and data type from the field name if they are not already specified.
|
||||
// This function modifies the key in place.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// key := &TelemetryFieldKey{Name: "resource.service.name:string"}
|
||||
// key.Normalize()
|
||||
// // Result: Name: "service.name", FieldContext: FieldContextResource, FieldDataType: FieldDataTypeString
|
||||
func (f *TelemetryFieldKey) Normalize() {
|
||||
|
||||
var explicitFieldContextProvided, explicitFieldDataTypeProvided bool
|
||||
var explicitFieldContext FieldContext
|
||||
var explicitFieldDataType FieldDataType
|
||||
var ok bool
|
||||
|
||||
if len(keyTextParts) > 1 {
|
||||
explicitFieldContext, ok = fieldContexts[keyTextParts[0]]
|
||||
if ok && explicitFieldContext != FieldContextUnspecified {
|
||||
explicitFieldContextProvided = true
|
||||
}
|
||||
}
|
||||
|
||||
if explicitFieldContextProvided {
|
||||
keyTextParts = keyTextParts[1:]
|
||||
}
|
||||
|
||||
// check if there is a field data type provided
|
||||
if len(keyTextParts) >= 1 {
|
||||
lastPart := keyTextParts[len(keyTextParts)-1]
|
||||
lastPartParts := strings.Split(lastPart, ":")
|
||||
if len(lastPartParts) > 1 {
|
||||
explicitFieldDataType, ok = fieldDataTypes[lastPartParts[1]]
|
||||
if ok && explicitFieldDataType != FieldDataTypeUnspecified {
|
||||
explicitFieldDataTypeProvided = true
|
||||
// Step 1: Parse data type from the right (after the last ":") if not already specified
|
||||
if f.FieldDataType == FieldDataTypeUnspecified {
|
||||
if colonIdx := strings.LastIndex(f.Name, ":"); colonIdx != -1 {
|
||||
potentialDataType := f.Name[colonIdx+1:]
|
||||
if dt, ok := fieldDataTypes[potentialDataType]; ok && dt != FieldDataTypeUnspecified {
|
||||
f.FieldDataType = dt
|
||||
f.Name = f.Name[:colonIdx]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if explicitFieldDataTypeProvided {
|
||||
keyTextParts[len(keyTextParts)-1] = lastPartParts[0]
|
||||
// Step 2: Parse field context from the left if not already specified
|
||||
if f.FieldContext == FieldContextUnspecified {
|
||||
if dotIdx := strings.Index(f.Name, "."); dotIdx != -1 {
|
||||
potentialContext := f.Name[:dotIdx]
|
||||
if fc, ok := fieldContexts[potentialContext]; ok && fc != FieldContextUnspecified {
|
||||
f.Name = f.Name[dotIdx+1:]
|
||||
f.FieldContext = fc
|
||||
|
||||
// Step 2a: Handle special case for log.body.* fields
|
||||
if f.FieldContext == FieldContextLog && strings.HasPrefix(f.Name, BodyJSONStringSearchPrefix) {
|
||||
f.FieldContext = FieldContextBody
|
||||
f.Name = strings.TrimPrefix(f.Name, BodyJSONStringSearchPrefix)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
realKey := strings.Join(keyTextParts, ".")
|
||||
}
|
||||
|
||||
fieldKeySelector := TelemetryFieldKey{
|
||||
Name: realKey,
|
||||
}
|
||||
// GetFieldKeyFromKeyText returns a TelemetryFieldKey from a key text.
|
||||
// The key text is expected to be in the format of `fieldContext.fieldName:fieldDataType` in the search query.
|
||||
// Both fieldContext and :fieldDataType are optional.
|
||||
// fieldName can contain dots and can start with a dot (e.g., ".http_code").
|
||||
// Special cases:
|
||||
// - When key exactly matches a field context name (e.g., "body", "attribute"), use unspecified context
|
||||
// - When key starts with "body." prefix, use "body" as context with remainder as field name
|
||||
func GetFieldKeyFromKeyText(key string) TelemetryFieldKey {
|
||||
var explicitFieldDataType FieldDataType = FieldDataTypeUnspecified
|
||||
var fieldName string
|
||||
|
||||
if explicitFieldContextProvided {
|
||||
fieldKeySelector.FieldContext = explicitFieldContext
|
||||
// Step 1: Parse data type from the right (after the last ":")
|
||||
var keyWithoutDataType string
|
||||
if colonIdx := strings.LastIndex(key, ":"); colonIdx != -1 {
|
||||
potentialDataType := key[colonIdx+1:]
|
||||
if dt, ok := fieldDataTypes[potentialDataType]; ok && dt != FieldDataTypeUnspecified {
|
||||
explicitFieldDataType = dt
|
||||
keyWithoutDataType = key[:colonIdx]
|
||||
} else {
|
||||
// No valid data type found, treat the entire key as the field name
|
||||
keyWithoutDataType = key
|
||||
}
|
||||
} else {
|
||||
fieldKeySelector.FieldContext = FieldContextUnspecified
|
||||
keyWithoutDataType = key
|
||||
}
|
||||
|
||||
if explicitFieldDataTypeProvided {
|
||||
fieldKeySelector.FieldDataType = explicitFieldDataType
|
||||
} else {
|
||||
fieldKeySelector.FieldDataType = FieldDataTypeUnspecified
|
||||
// Step 2: Parse field context from the left
|
||||
if dotIdx := strings.Index(keyWithoutDataType, "."); dotIdx != -1 {
|
||||
potentialContext := keyWithoutDataType[:dotIdx]
|
||||
if fc, ok := fieldContexts[potentialContext]; ok && fc != FieldContextUnspecified {
|
||||
fieldName = keyWithoutDataType[dotIdx+1:]
|
||||
|
||||
// Step 2a: Handle special case for log.body.* fields
|
||||
if fc == FieldContextLog && strings.HasPrefix(fieldName, BodyJSONStringSearchPrefix) {
|
||||
fc = FieldContextBody
|
||||
fieldName = strings.TrimPrefix(fieldName, BodyJSONStringSearchPrefix)
|
||||
}
|
||||
|
||||
return TelemetryFieldKey{
|
||||
Name: fieldName,
|
||||
FieldContext: fc,
|
||||
FieldDataType: explicitFieldDataType,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fieldKeySelector
|
||||
// Step 3: No context found, entire key is the field name
|
||||
return TelemetryFieldKey{
|
||||
Name: keyWithoutDataType,
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: explicitFieldDataType,
|
||||
}
|
||||
}
|
||||
|
||||
func TelemetryFieldKeyToText(key *TelemetryFieldKey) string {
|
||||
var sb strings.Builder
|
||||
if key.FieldContext != FieldContextUnspecified {
|
||||
sb.WriteString(key.FieldContext.StringValue())
|
||||
sb.WriteString(".")
|
||||
}
|
||||
sb.WriteString(key.Name)
|
||||
if key.FieldDataType != FieldDataTypeUnspecified {
|
||||
sb.WriteString(":")
|
||||
sb.WriteString(key.FieldDataType.StringValue())
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func FieldKeyToMaterializedColumnName(key *TelemetryFieldKey) string {
|
||||
|
||||
@@ -83,12 +83,314 @@ func TestGetFieldKeyFromKeyText(t *testing.T) {
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for log.body.status - should use log context with body.status as field name
|
||||
{
|
||||
keyText: "log.body.status",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "status",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for log.body. with data type
|
||||
{
|
||||
keyText: "log.body.status_code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "status_code",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
// Test case for log.body. with nested field name
|
||||
{
|
||||
keyText: "log.body.http.status.code",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status.code",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for body. prefix - should use body as context
|
||||
{
|
||||
keyText: "body.http.status.code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status.code",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
// Test case for body. prefix without data type - should use body as context
|
||||
{
|
||||
keyText: "body.status",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "status",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for body. prefix with array data type - should use body as context
|
||||
{
|
||||
keyText: "body.tags:[]string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "tags",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeArrayString,
|
||||
},
|
||||
},
|
||||
// Test case for body. prefix with array data type (int64)
|
||||
{
|
||||
keyText: "body.ids:[]int64",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "ids",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeArrayInt64,
|
||||
},
|
||||
},
|
||||
// Test case for body. prefix with nested field and array data type
|
||||
{
|
||||
keyText: "body.http.headers:[]string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.headers",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeArrayString,
|
||||
},
|
||||
},
|
||||
// Test case for just "body" - should use unspecified context with body as field name
|
||||
{
|
||||
keyText: "body",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for just "body" with data type
|
||||
{
|
||||
keyText: "body:string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
// Test case for log.body (without trailing dot) - should keep log context
|
||||
{
|
||||
keyText: "log.body",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: FieldContextLog,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for log.body with data type - should keep log context
|
||||
{
|
||||
keyText: "log.body:string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: FieldContextLog,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
// Test case for field name with dots and no context
|
||||
{
|
||||
keyText: "http.status.code",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status.code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for field name with dots and data type but no context
|
||||
{
|
||||
keyText: "http.status.code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status.code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
// Test case for just field name
|
||||
{
|
||||
keyText: "fieldName",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "fieldName",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for just field name with data type
|
||||
{
|
||||
keyText: "fieldName:string",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "fieldName",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
// Test case for field starting with dot
|
||||
{
|
||||
keyText: ".http_code",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: ".http_code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
// Test case for field starting with dot and with data type
|
||||
{
|
||||
keyText: ".http_code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: ".http_code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
// Test case for field starting with dot and nested field name
|
||||
{
|
||||
keyText: ".http.status.code:int",
|
||||
expected: TelemetryFieldKey{
|
||||
Name: ".http.status.code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
result := GetFieldKeyFromKeyText(testCase.keyText)
|
||||
if !reflect.DeepEqual(result, testCase.expected) {
|
||||
t.Errorf("expected %v, got %v", testCase.expected, result)
|
||||
t.Errorf("For key '%s': expected %v, got %v", testCase.keyText, testCase.expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
input TelemetryFieldKey
|
||||
expected TelemetryFieldKey
|
||||
}{
|
||||
{
|
||||
name: "Normalize key with context and data type in name",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "resource.service.name:string",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: FieldContextResource,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize key with existing context and data type",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: FieldContextResource,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "service.name",
|
||||
FieldContext: FieldContextResource,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize body field",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "body.status:int",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "status",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize log.body.* field",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "log.body.status",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "status",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize field with no context",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "http.status.code:int",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.status.code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize exact body keyword",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "body",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize span field",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "span.kind:string",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "kind",
|
||||
FieldContext: FieldContextSpan,
|
||||
FieldDataType: FieldDataTypeString,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize attribute field",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "attribute.http.method",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "http.method",
|
||||
FieldContext: FieldContextAttribute,
|
||||
FieldDataType: FieldDataTypeUnspecified,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize field starting with dot",
|
||||
input: TelemetryFieldKey{
|
||||
Name: ".http_code:int",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: ".http_code",
|
||||
FieldContext: FieldContextUnspecified,
|
||||
FieldDataType: FieldDataTypeNumber,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Normalize array data type field",
|
||||
input: TelemetryFieldKey{
|
||||
Name: "body.tags:[]string",
|
||||
},
|
||||
expected: TelemetryFieldKey{
|
||||
Name: "tags",
|
||||
FieldContext: FieldContextBody,
|
||||
FieldDataType: FieldDataTypeArrayString,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
key := tc.input
|
||||
key.Normalize()
|
||||
if !reflect.DeepEqual(key, tc.expected) {
|
||||
t.Errorf("Expected %v, got %v", tc.expected, key)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -377,7 +377,7 @@ def idp_login(driver: webdriver.Chrome) -> Callable[[str, str], None]:
|
||||
# Fill the email in username field
|
||||
username_field = wait.until(EC.element_to_be_clickable((By.ID, "username")))
|
||||
username_field.send_keys(email)
|
||||
|
||||
|
||||
# Fill the password in password field
|
||||
password_field = wait.until(EC.element_to_be_clickable((By.ID, "password")))
|
||||
password_field.send_keys(password)
|
||||
|
||||
@@ -66,7 +66,7 @@ def signoz( # pylint: disable=too-many-arguments,too-many-positional-arguments
|
||||
"SIGNOZ_PROMETHEUS_ACTIVE__QUERY__TRACKER_ENABLED": False,
|
||||
"SIGNOZ_GATEWAY_URL": gateway.container_configs["8080"].base(),
|
||||
"SIGNOZ_TOKENIZER_JWT_SECRET": "secret",
|
||||
"SIGNOZ_GLOBAL_INGESTION__URL": "https://ingest.test.signoz.cloud"
|
||||
"SIGNOZ_GLOBAL_INGESTION__URL": "https://ingest.test.signoz.cloud",
|
||||
}
|
||||
| sqlstore.env
|
||||
| clickhouse.env
|
||||
|
||||
@@ -2,6 +2,7 @@ from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, List
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
@@ -11,7 +12,9 @@ from fixtures.querier import (
|
||||
assert_minutely_bucket_values,
|
||||
find_named_result,
|
||||
index_series_by_label,
|
||||
make_query_request,
|
||||
)
|
||||
from src.querier.util import assert_identical_query_response
|
||||
|
||||
|
||||
def test_logs_list(
|
||||
@@ -396,6 +399,122 @@ def test_logs_list(
|
||||
assert "d-001" in values
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_by_context,expected_order",
|
||||
####
|
||||
# Tests:
|
||||
# 1. Query logs ordered by attribute.service.name descending
|
||||
# 2. Query logs ordered by resource.service.name descending
|
||||
# 3. Query logs ordered by service.name descending
|
||||
###
|
||||
[
|
||||
pytest.param("attribute", ["log-002", "log-001", "log-004", "log-003"]),
|
||||
pytest.param("resource", ["log-003", "log-004", "log-001", "log-002"]),
|
||||
pytest.param("", ["log-002", "log-001", "log-003", "log-004"]),
|
||||
],
|
||||
)
|
||||
def test_logs_list_with_order_by(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_logs: Callable[[List[Logs]], None],
|
||||
order_by_context: str,
|
||||
expected_order: List[str],
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 3 logs with service.name in attributes and resources
|
||||
"""
|
||||
|
||||
attribute_resource_pair = [
|
||||
[{"id": "log-001", "service.name": "c"}, {}],
|
||||
[{"id": "log-002", "service.name": "d"}, {}],
|
||||
[{"id": "log-003"}, {"service.name": "b"}],
|
||||
[{"id": "log-004"}, {"service.name": "a"}],
|
||||
]
|
||||
insert_logs(
|
||||
[
|
||||
Logs(
|
||||
timestamp=datetime.now(tz=timezone.utc) - timedelta(seconds=3),
|
||||
attributes=attribute_resource_pair[i][0],
|
||||
resources=attribute_resource_pair[i][1],
|
||||
body="Log with DEBUG severity",
|
||||
severity_text="DEBUG",
|
||||
)
|
||||
for i in range(4)
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
query = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "logs",
|
||||
"order": [
|
||||
{
|
||||
"key": {
|
||||
"name": "service.name",
|
||||
"fieldContext": order_by_context,
|
||||
},
|
||||
"direction": "desc",
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
query_with_inline_context = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "logs",
|
||||
"order": [
|
||||
{
|
||||
"key": {
|
||||
"name": f"{order_by_context + '.' if order_by_context else ''}service.name",
|
||||
},
|
||||
"direction": "desc",
|
||||
}
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
response = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(datetime.now(tz=timezone.utc) - timedelta(minutes=1)).timestamp() * 1000
|
||||
),
|
||||
end_ms=int(datetime.now(tz=timezone.utc).timestamp() * 1000),
|
||||
request_type="raw",
|
||||
queries=[query],
|
||||
)
|
||||
|
||||
# Verify that both queries return the same results with specifying context with key name
|
||||
response_with_inline_context = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(datetime.now(tz=timezone.utc) - timedelta(minutes=1)).timestamp() * 1000
|
||||
),
|
||||
end_ms=int(datetime.now(tz=timezone.utc).timestamp() * 1000),
|
||||
request_type="raw",
|
||||
queries=[query_with_inline_context],
|
||||
)
|
||||
|
||||
assert_identical_query_response(response, response_with_inline_context)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
results = response.json()["data"]["data"]["results"]
|
||||
rows = results[0]["rows"]
|
||||
ids = [row["data"]["attributes_string"].get("id", "") for row in rows]
|
||||
|
||||
assert ids == expected_order
|
||||
|
||||
|
||||
def test_logs_time_series_count(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
@@ -825,6 +944,44 @@ def test_logs_time_series_count(
|
||||
},
|
||||
)
|
||||
|
||||
response_with_inline_context = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(
|
||||
datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
- timedelta(minutes=5)
|
||||
).timestamp()
|
||||
* 1000
|
||||
),
|
||||
end_ms=int(
|
||||
datetime.now(tz=timezone.utc).replace(second=0, microsecond=0).timestamp()
|
||||
* 1000
|
||||
),
|
||||
request_type="time_series",
|
||||
queries=[
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "logs",
|
||||
"stepInterval": 60,
|
||||
"disabled": False,
|
||||
"groupBy": [
|
||||
{
|
||||
"name": "resource.host.name:string",
|
||||
}
|
||||
],
|
||||
"order": [{"key": {"name": "host.name"}, "direction": "desc"}],
|
||||
"having": {"expression": ""},
|
||||
"aggregations": [{"expression": "count()"}],
|
||||
},
|
||||
}
|
||||
],
|
||||
)
|
||||
|
||||
assert_identical_query_response(response, response_with_inline_context)
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ from datetime import datetime, timedelta, timezone
|
||||
from http import HTTPStatus
|
||||
from typing import Callable, Dict, List
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
|
||||
from fixtures import types
|
||||
@@ -10,8 +11,10 @@ from fixtures.querier import (
|
||||
assert_minutely_bucket_values,
|
||||
find_named_result,
|
||||
index_series_by_label,
|
||||
make_query_request,
|
||||
)
|
||||
from fixtures.traces import TraceIdGenerator, Traces, TracesKind, TracesStatusCode
|
||||
from src.querier.util import assert_identical_query_response
|
||||
|
||||
|
||||
def test_traces_list(
|
||||
@@ -215,6 +218,59 @@ def test_traces_list(
|
||||
},
|
||||
)
|
||||
|
||||
# Query results with context appended to key names
|
||||
response_with_inline_context = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(datetime.now(tz=timezone.utc) - timedelta(minutes=5)).timestamp() * 1000
|
||||
),
|
||||
end_ms=int(datetime.now(tz=timezone.utc).timestamp() * 1000),
|
||||
request_type="raw",
|
||||
queries=[
|
||||
{
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"disabled": False,
|
||||
"limit": 10,
|
||||
"offset": 0,
|
||||
"order": [
|
||||
{"key": {"name": "timestamp"}, "direction": "desc"},
|
||||
],
|
||||
"selectFields": [
|
||||
{
|
||||
"name": "resource.service.name",
|
||||
"fieldDataType": "string",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "span.name:string",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "span.duration_nano",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "span.http_method",
|
||||
"signal": "traces",
|
||||
},
|
||||
{
|
||||
"name": "span.response_status_code",
|
||||
"signal": "traces",
|
||||
},
|
||||
],
|
||||
"having": {"expression": ""},
|
||||
"aggregations": [{"expression": "count()"}],
|
||||
},
|
||||
}
|
||||
],
|
||||
)
|
||||
assert_identical_query_response(response, response_with_inline_context)
|
||||
|
||||
|
||||
assert response.status_code == HTTPStatus.OK
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
@@ -420,6 +476,236 @@ def test_traces_list(
|
||||
assert set(values) == set(["topic-service", "http-service"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"order_by,aggregation_alias,expected_status",
|
||||
[
|
||||
# Case 1a: count by count()
|
||||
pytest.param({"name": "count()"}, "count_", HTTPStatus.OK),
|
||||
# Case 1b: count by count() with alias span.count_
|
||||
pytest.param({"name": "count()"}, "span.count_", HTTPStatus.OK),
|
||||
# Case 2a: count by count() with context specified in the key
|
||||
pytest.param(
|
||||
{"name": "count()", "fieldContext": "span"}, "count_", HTTPStatus.OK
|
||||
),
|
||||
# Case 2b: count by count() with context specified in the key with alias span.count_
|
||||
pytest.param(
|
||||
{"name": "count()", "fieldContext": "span"}, "span.count_", HTTPStatus.OK
|
||||
),
|
||||
# Case 3a: count by span.count() and context specified in the key [BAD REQUEST]
|
||||
pytest.param(
|
||||
{"name": "span.count()", "fieldContext": "span"},
|
||||
"count_",
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
),
|
||||
# Case 3b: count by span.count() and context specified in the key with alias span.count_ [BAD REQUEST]
|
||||
pytest.param(
|
||||
{"name": "span.count()", "fieldContext": "span"},
|
||||
"span.count_",
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
),
|
||||
# Case 4a: count by span.count() and context specified in the key
|
||||
pytest.param(
|
||||
{"name": "span.count()", "fieldContext": ""}, "count_", HTTPStatus.OK
|
||||
),
|
||||
# Case 4b: count by span.count() and context specified in the key with alias span.count_
|
||||
pytest.param(
|
||||
{"name": "span.count()", "fieldContext": ""}, "span.count_", HTTPStatus.OK
|
||||
),
|
||||
# Case 5a: count by count_
|
||||
pytest.param({"name": "count_"}, "count_", HTTPStatus.OK),
|
||||
# Case 5b: count by count_ with alias span.count_
|
||||
pytest.param({"name": "count_"}, "count_", HTTPStatus.OK),
|
||||
# Case 6a: count by span.count_
|
||||
pytest.param({"name": "span.count_"}, "count_", HTTPStatus.OK),
|
||||
# Case 6b: count by span.count_ with alias span.count_
|
||||
pytest.param(
|
||||
{"name": "span.count_"}, "span.count_", HTTPStatus.BAD_REQUEST
|
||||
), # THIS SHOULD BE OK BUT FAILS DUE TO LIMITATION IN CURRENT IMPLEMENTATION
|
||||
# Case 7a: count by span.count_ and context specified in the key [BAD REQUEST]
|
||||
pytest.param(
|
||||
{"name": "span.count_", "fieldContext": "span"},
|
||||
"count_",
|
||||
HTTPStatus.BAD_REQUEST,
|
||||
),
|
||||
# Case 7b: count by span.count_ and context specified in the key with alias span.count_
|
||||
pytest.param(
|
||||
{"name": "span.count_", "fieldContext": "span"},
|
||||
"span.count_",
|
||||
HTTPStatus.OK,
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_traces_aggergate_order_by_count(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
get_token: Callable[[str, str], str],
|
||||
insert_traces: Callable[[List[Traces]], None],
|
||||
order_by: Dict[str, str],
|
||||
aggregation_alias: str,
|
||||
expected_status: HTTPStatus,
|
||||
) -> None:
|
||||
"""
|
||||
Setup:
|
||||
Insert 4 traces with different attributes.
|
||||
http-service: POST /integration -> SELECT, HTTP PATCH
|
||||
topic-service: topic publish
|
||||
|
||||
Tests:
|
||||
1. Query traces count for spans grouped by service.name and host.name
|
||||
"""
|
||||
http_service_trace_id = TraceIdGenerator.trace_id()
|
||||
http_service_span_id = TraceIdGenerator.span_id()
|
||||
http_service_db_span_id = TraceIdGenerator.span_id()
|
||||
http_service_patch_span_id = TraceIdGenerator.span_id()
|
||||
topic_service_trace_id = TraceIdGenerator.trace_id()
|
||||
topic_service_span_id = TraceIdGenerator.span_id()
|
||||
|
||||
now = datetime.now(tz=timezone.utc).replace(second=0, microsecond=0)
|
||||
|
||||
insert_traces(
|
||||
[
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=4),
|
||||
duration=timedelta(seconds=3),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_span_id,
|
||||
parent_span_id="",
|
||||
name="POST /integration",
|
||||
kind=TracesKind.SPAN_KIND_SERVER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
},
|
||||
attributes={
|
||||
"net.transport": "IP.TCP",
|
||||
"http.scheme": "http",
|
||||
"http.user_agent": "Integration Test",
|
||||
"http.request.method": "POST",
|
||||
"http.response.status_code": "200",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3.5),
|
||||
duration=timedelta(seconds=0.5),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_db_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="SELECT",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
},
|
||||
attributes={
|
||||
"db.name": "integration",
|
||||
"db.operation": "SELECT",
|
||||
"db.statement": "SELECT * FROM integration",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=3),
|
||||
duration=timedelta(seconds=1),
|
||||
trace_id=http_service_trace_id,
|
||||
span_id=http_service_patch_span_id,
|
||||
parent_span_id=http_service_span_id,
|
||||
name="HTTP PATCH",
|
||||
kind=TracesKind.SPAN_KIND_CLIENT,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "http-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-000",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "000",
|
||||
},
|
||||
attributes={
|
||||
"http.request.method": "PATCH",
|
||||
"http.status_code": "404",
|
||||
},
|
||||
),
|
||||
Traces(
|
||||
timestamp=now - timedelta(seconds=1),
|
||||
duration=timedelta(seconds=4),
|
||||
trace_id=topic_service_trace_id,
|
||||
span_id=topic_service_span_id,
|
||||
parent_span_id="",
|
||||
name="topic publish",
|
||||
kind=TracesKind.SPAN_KIND_PRODUCER,
|
||||
status_code=TracesStatusCode.STATUS_CODE_OK,
|
||||
status_message="",
|
||||
resources={
|
||||
"deployment.environment": "production",
|
||||
"service.name": "topic-service",
|
||||
"os.type": "linux",
|
||||
"host.name": "linux-001",
|
||||
"cloud.provider": "integration",
|
||||
"cloud.account.id": "001",
|
||||
},
|
||||
attributes={
|
||||
"message.type": "SENT",
|
||||
"messaging.operation": "publish",
|
||||
"messaging.message.id": "001",
|
||||
},
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
token = get_token(USER_ADMIN_EMAIL, USER_ADMIN_PASSWORD)
|
||||
|
||||
query = {
|
||||
"type": "builder_query",
|
||||
"spec": {
|
||||
"name": "A",
|
||||
"signal": "traces",
|
||||
"disabled": False,
|
||||
"order": [{"key": {"name": "count()"}, "direction": "desc"}],
|
||||
"aggregations": [{"expression": "count()", "alias": "count_"}],
|
||||
},
|
||||
}
|
||||
|
||||
# Query traces count for spans
|
||||
|
||||
query["spec"]["order"][0]["key"] = order_by
|
||||
query["spec"]["aggregations"][0]["alias"] = aggregation_alias
|
||||
response = make_query_request(
|
||||
signoz,
|
||||
token,
|
||||
start_ms=int(
|
||||
(datetime.now(tz=timezone.utc) - timedelta(minutes=5)).timestamp() * 1000
|
||||
),
|
||||
end_ms=int(datetime.now(tz=timezone.utc).timestamp() * 1000),
|
||||
request_type="time_series",
|
||||
queries=[query],
|
||||
)
|
||||
|
||||
assert response.status_code == expected_status
|
||||
if expected_status != HTTPStatus.OK:
|
||||
return
|
||||
assert response.json()["status"] == "success"
|
||||
|
||||
results = response.json()["data"]["data"]["results"]
|
||||
assert len(results) == 1
|
||||
aggregations = results[0]["aggregations"]
|
||||
assert len(aggregations) == 1
|
||||
series = aggregations[0]["series"]
|
||||
assert len(series) == 1
|
||||
assert series[0]["values"][0]["value"] == 4
|
||||
|
||||
|
||||
def test_traces_fill_gaps(
|
||||
signoz: types.SigNoz,
|
||||
create_user_admin: None, # pylint: disable=unused-argument
|
||||
|
||||
20
tests/integration/src/querier/util.py
Normal file
20
tests/integration/src/querier/util.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from http import HTTPStatus
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
def assert_identical_query_response(
|
||||
response1: requests.Response, response2: requests.Response
|
||||
) -> None:
|
||||
"""
|
||||
Assert that two query responses are identical in status and data.
|
||||
"""
|
||||
assert response1.status_code == response2.status_code, "Status codes do not match"
|
||||
if response1.status_code == HTTPStatus.OK:
|
||||
assert (
|
||||
response1.json()["status"] == response2.json()["status"]
|
||||
), "Response statuses do not match"
|
||||
assert (
|
||||
response1.json()["data"]["data"]["results"]
|
||||
== response2.json()["data"]["data"]["results"]
|
||||
), "Response data do not match"
|
||||
Reference in New Issue
Block a user