Compare commits

..

1 Commits

Author SHA1 Message Date
ahmadshaheer
661a05f4a4 chore: button component demo 2025-04-28 20:20:23 +04:30
45 changed files with 2679 additions and 192 deletions

View File

@@ -66,6 +66,8 @@ func NewAPIHandler(opts APIHandlerOptions, signoz *signoz.SigNoz) (*APIHandler,
LogsParsingPipelineController: opts.LogsParsingPipelineController,
Cache: opts.Cache,
FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
AlertmanagerAPI: alertmanager.NewAPI(signoz.Alertmanager),
FieldsAPI: fields.NewAPI(signoz.TelemetryStore),
Signoz: signoz,

View File

@@ -23,10 +23,12 @@ func NewDataConnector(
telemetryStore telemetrystore.TelemetryStore,
prometheus prometheus.Prometheus,
cluster string,
useLogsNewSchema bool,
useTraceNewSchema bool,
fluxIntervalForTraceDetail time.Duration,
cache cache.Cache,
) *ClickhouseReader {
chReader := basechr.NewReader(sqlDB, telemetryStore, prometheus, cluster, fluxIntervalForTraceDetail, cache)
chReader := basechr.NewReader(sqlDB, telemetryStore, prometheus, cluster, useLogsNewSchema, useTraceNewSchema, fluxIntervalForTraceDetail, cache)
return &ClickhouseReader{
conn: telemetryStore.ClickhouseDB(),
appdb: sqlDB,

View File

@@ -62,6 +62,8 @@ type ServerOptions struct {
FluxIntervalForTraceDetail string
Cluster string
GatewayUrl string
UseLogsNewSchema bool
UseTraceNewSchema bool
Jwt *authtypes.JWT
}
@@ -130,6 +132,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.SigNoz.TelemetryStore,
serverOptions.SigNoz.Prometheus,
serverOptions.Cluster,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
fluxIntervalForTraceDetail,
serverOptions.SigNoz.Cache,
)
@@ -147,6 +151,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.SigNoz.SQLStore.SQLxDB(),
reader,
c,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
serverOptions.SigNoz.Alertmanager,
serverOptions.SigNoz.SQLStore,
serverOptions.SigNoz.TelemetryStore,
@@ -227,6 +233,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
FluxInterval: fluxInterval,
Gateway: gatewayProxy,
GatewayUrl: serverOptions.GatewayUrl,
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
JWT: serverOptions.Jwt,
}
@@ -236,6 +244,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
}
s := &Server{
// logger: logger,
// tracer: tracer,
ruleManager: rm,
serverOptions: serverOptions,
unavailableChannel: make(chan healthcheck.Status),
@@ -476,6 +486,8 @@ func makeRulesManager(
db *sqlx.DB,
ch baseint.Reader,
cache cache.Cache,
useLogsNewSchema bool,
useTraceNewSchema bool,
alertmanager alertmanager.Alertmanager,
sqlstore sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
@@ -492,6 +504,8 @@ func makeRulesManager(
Cache: cache,
EvalDelay: baseconst.GetEvalDelay(),
PrepareTaskFunc: rules.PrepareTaskFunc,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
PrepareTestRuleFunc: rules.TestNotification,
Alertmanager: alertmanager,
SQLStore: sqlstore,

View File

@@ -21,7 +21,6 @@ import (
"go.uber.org/zap/zapcore"
)
// Deprecated: Please use the logger from pkg/instrumentation.
func initZapLog() *zap.Logger {
config := zap.NewProductionConfig()
config.EncoderConfig.TimeKey = "timestamp"
@@ -51,9 +50,7 @@ func main() {
var gatewayUrl string
var useLicensesV3 bool
// Deprecated
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
// Deprecated
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
// Deprecated
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
@@ -139,6 +136,8 @@ func main() {
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
Cluster: cluster,
GatewayUrl: gatewayUrl,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
Jwt: jwt,
}

View File

@@ -25,6 +25,8 @@ func PrepareTaskFunc(opts baserules.PrepareTaskOptions) (baserules.Task, error)
ruleId,
opts.Rule,
opts.Reader,
opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithEvalDelay(opts.ManagerOpts.EvalDelay),
baserules.WithSQLStore(opts.SQLStore),
)
@@ -121,6 +123,8 @@ func TestNotification(opts baserules.PrepareTestRuleOptions) (int, *basemodel.Ap
alertname,
parsedRule,
opts.Reader,
opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
baserules.WithSendAlways(),
baserules.WithSendUnmatched(),
baserules.WithSQLStore(opts.SQLStore),

View File

@@ -43,6 +43,7 @@
"@radix-ui/react-tooltip": "1.0.7",
"@sentry/react": "8.41.0",
"@sentry/webpack-plugin": "2.22.6",
"@signozhq/button": "0.0.0",
"@signozhq/design-tokens": "1.1.4",
"@tanstack/react-table": "8.20.6",
"@tanstack/react-virtual": "3.11.2",

View File

@@ -64,6 +64,10 @@ export const TraceDetail = Loadable(
),
);
export const UsageExplorerPage = Loadable(
() => import(/* webpackChunkName: "UsageExplorerPage" */ 'modules/Usage'),
);
export const SignupPage = Loadable(
() => import(/* webpackChunkName: "SignupPage" */ 'pages/SignUp'),
);

View File

@@ -57,6 +57,7 @@ import {
TracesFunnels,
TracesSaveViews,
UnAuthorized,
UsageExplorerPage,
WorkspaceAccessRestricted,
WorkspaceBlocked,
WorkspaceSuspended,
@@ -154,6 +155,13 @@ const routes: AppRoutes[] = [
isPrivate: true,
key: 'SETTINGS',
},
{
path: ROUTES.USAGE_EXPLORER,
exact: true,
component: UsageExplorerPage,
isPrivate: true,
key: 'USAGE_EXPLORER',
},
{
path: ROUTES.ALL_DASHBOARD,
exact: true,

View File

@@ -0,0 +1,26 @@
import axios from 'api';
import { ErrorResponseHandler } from 'api/ErrorResponseHandler';
import { AxiosError } from 'axios';
import { ErrorResponse, SuccessResponse } from 'types/api';
import { PayloadProps, Props } from 'types/api/logs/getLogs';
const GetLogs = async (
props: Props,
): Promise<SuccessResponse<PayloadProps> | ErrorResponse> => {
try {
const data = await axios.get(`/logs`, {
params: props,
});
return {
statusCode: 200,
error: null,
message: '',
payload: data.data.results,
};
} catch (error) {
return ErrorResponseHandler(error as AxiosError);
}
};
export default GetLogs;

View File

@@ -0,0 +1,19 @@
import apiV1 from 'api/apiV1';
import getLocalStorageKey from 'api/browser/localstorage/get';
import { ENVIRONMENT } from 'constants/env';
import { LOCALSTORAGE } from 'constants/localStorage';
import { EventSourcePolyfill } from 'event-source-polyfill';
// 10 min in ms
const TIMEOUT_IN_MS = 10 * 60 * 1000;
export const LiveTail = (queryParams: string): EventSourcePolyfill =>
new EventSourcePolyfill(
`${ENVIRONMENT.baseURL}${apiV1}logs/tail?${queryParams}`,
{
headers: {
Authorization: `Bearer ${getLocalStorageKey(LOCALSTORAGE.AUTH_TOKEN)}`,
},
heartbeatTimeout: TIMEOUT_IN_MS,
},
);

View File

@@ -17,6 +17,7 @@ const ROUTES = {
'/get-started/infrastructure-monitoring',
GET_STARTED_AWS_MONITORING: '/get-started/aws-monitoring',
GET_STARTED_AZURE_MONITORING: '/get-started/azure-monitoring',
USAGE_EXPLORER: '/usage-explorer',
APPLICATION: '/services',
ALL_DASHBOARD: '/dashboard',
DASHBOARD: '/dashboard/:dashboardId',

View File

@@ -1,6 +1,7 @@
/* eslint-disable sonarjs/no-duplicate-string */
import './Home.styles.scss';
import { Button as SignozButton } from '@signozhq/button';
import { Color } from '@signozhq/design-tokens';
import { Alert, Button, Popover } from 'antd';
import logEvent from 'api/common/logEvent';
@@ -305,6 +306,9 @@ export default function Home(): JSX.Element {
}
rightComponent={
<div className="home-header-right">
<SignozButton variant="solid" size="lg" color="primary">
Button
</SignozButton>
{isWelcomeChecklistSkipped && (
<Popover
placement="bottomRight"

View File

@@ -133,3 +133,231 @@ const ServicesListTable = memo(
),
);
ServicesListTable.displayName = 'ServicesListTable';
function ServiceMetrics({
onUpdateChecklistDoneItem,
loadingUserPreferences,
}: {
onUpdateChecklistDoneItem: (itemKey: string) => void;
loadingUserPreferences: boolean;
}): JSX.Element {
const { selectedTime: globalSelectedInterval } = useSelector<
AppState,
GlobalReducer
>((state) => state.globalTime);
const { user, activeLicenseV3 } = useAppContext();
const [timeRange, setTimeRange] = useState(() => {
const now = new Date().getTime();
return {
startTime: now - homeInterval,
endTime: now,
selectedInterval: homeInterval,
};
});
const { queries } = useResourceAttribute();
const { safeNavigate } = useSafeNavigate();
const selectedTags = useMemo(
() => (convertRawQueriesToTraceSelectedTags(queries) as Tags[]) || [],
[queries],
);
const [isError, setIsError] = useState(false);
const queryKey: QueryKey = useMemo(
() => [
timeRange.startTime,
timeRange.endTime,
selectedTags,
globalSelectedInterval,
],
[
timeRange.startTime,
timeRange.endTime,
selectedTags,
globalSelectedInterval,
],
);
const {
data,
isLoading: isLoadingTopLevelOperations,
isError: isErrorTopLevelOperations,
} = useGetTopLevelOperations(queryKey, {
start: timeRange.startTime * 1e6,
end: timeRange.endTime * 1e6,
});
const handleTimeIntervalChange = useCallback((value: number): void => {
const timeInterval = TIME_PICKER_OPTIONS.find(
(option) => option.value === value,
);
logEvent('Homepage: Services time interval updated', {
updatedTimeInterval: timeInterval?.label,
});
const now = new Date();
setTimeRange({
startTime: now.getTime() - value,
endTime: now.getTime(),
selectedInterval: value,
});
}, []);
const topLevelOperations = useMemo(() => Object.entries(data || {}), [data]);
const queryRangeRequestData = useMemo(
() =>
getQueryRangeRequestData({
topLevelOperations,
minTime: timeRange.startTime * 1e6,
maxTime: timeRange.endTime * 1e6,
globalSelectedInterval,
}),
[
globalSelectedInterval,
timeRange.endTime,
timeRange.startTime,
topLevelOperations,
],
);
const dataQueries = useGetQueriesRange(
queryRangeRequestData,
ENTITY_VERSION_V4,
{
queryKey: useMemo(
() => [
`GetMetricsQueryRange-home-${globalSelectedInterval}`,
timeRange.endTime,
timeRange.startTime,
globalSelectedInterval,
],
[globalSelectedInterval, timeRange.endTime, timeRange.startTime],
),
keepPreviousData: true,
enabled: true,
refetchOnMount: false,
onError: () => {
setIsError(true);
},
},
);
const isLoading = useMemo(() => dataQueries.some((query) => query.isLoading), [
dataQueries,
]);
const services: ServicesList[] = useMemo(
() =>
getServiceListFromQuery({
queries: dataQueries,
topLevelOperations,
isLoading,
}),
[dataQueries, topLevelOperations, isLoading],
);
const sortedServices = useMemo(
() =>
services?.sort((a, b) => {
const aUpdateAt = new Date(a.p99).getTime();
const bUpdateAt = new Date(b.p99).getTime();
return bUpdateAt - aUpdateAt;
}) || [],
[services],
);
const servicesExist = sortedServices.length > 0;
const top5Services = useMemo(() => sortedServices.slice(0, 5), [
sortedServices,
]);
useEffect(() => {
if (!loadingUserPreferences && servicesExist) {
onUpdateChecklistDoneItem('SETUP_SERVICES');
}
}, [onUpdateChecklistDoneItem, loadingUserPreferences, servicesExist]);
const handleRowClick = useCallback(
(record: ServicesList) => {
logEvent('Homepage: Service clicked', {
serviceName: record.serviceName,
});
safeNavigate(`${ROUTES.APPLICATION}/${record.serviceName}`);
},
[safeNavigate],
);
if (isLoadingTopLevelOperations || isLoading) {
return (
<Card className="services-list-card home-data-card loading-card">
<Card.Content>
<Skeleton active />
</Card.Content>
</Card>
);
}
if (isErrorTopLevelOperations || isError) {
return (
<Card className="services-list-card home-data-card error-card">
<Card.Content>
<Skeleton active />
</Card.Content>
</Card>
);
}
return (
<Card className="services-list-card home-data-card">
{servicesExist && (
<Card.Header>
<div className="services-header home-data-card-header">
{' '}
Services
<div className="services-header-actions">
<Select
value={timeRange.selectedInterval}
onChange={handleTimeIntervalChange}
options={TIME_PICKER_OPTIONS}
className="services-header-select"
/>
</div>
</div>
</Card.Header>
)}
<Card.Content>
{servicesExist ? (
<ServicesListTable services={top5Services} onRowClick={handleRowClick} />
) : (
<EmptyState user={user} activeLicenseV3={activeLicenseV3} />
)}
</Card.Content>
{servicesExist && (
<Card.Footer>
<div className="services-footer home-data-card-footer">
<Link to="/services">
<Button
type="link"
className="periscope-btn link learn-more-link"
onClick={(): void => {
logEvent('Homepage: All Services clicked', {});
}}
>
All Services <ArrowRight size={12} />
</Button>
</Link>
</div>
</Card.Footer>
)}
</Card>
);
}
export default memo(ServiceMetrics);

View File

@@ -21,10 +21,17 @@ function Services({
return (
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
<div className="home-services-container">
<ServiceTraces
{isSpanMetricEnabled ? (
<ServiceMetrics
onUpdateChecklistDoneItem={onUpdateChecklistDoneItem}
loadingUserPreferences={loadingUserPreferences}
/>
) : (
<ServiceTraces
onUpdateChecklistDoneItem={onUpdateChecklistDoneItem}
loadingUserPreferences={loadingUserPreferences}
/>
)}
</div>
</Sentry.ErrorBoundary>
);

View File

@@ -481,6 +481,7 @@ export const apDexMetricsQueryBuilderQueries = ({
export const operationPerSec = ({
servicename,
tagFilterItems,
topLevelOperations,
}: OperationPerSecProps): QueryBuilderData => {
const autocompleteData: BaseAutocompleteData[] = [
{

View File

@@ -1,4 +1,7 @@
import logEvent from 'api/common/logEvent';
import getTopLevelOperations, {
ServiceDataProps,
} from 'api/metrics/getTopLevelOperations';
import { FeatureKeys } from 'constants/features';
import { QueryParams } from 'constants/query';
import { PANEL_TYPES } from 'constants/queryBuilder';
@@ -107,6 +110,21 @@ function Application(): JSX.Element {
// eslint-disable-next-line react-hooks/exhaustive-deps
}, []);
const {
data: topLevelOperations,
error: topLevelOperationsError,
isLoading: topLevelOperationsIsLoading,
isError: topLevelOperationsIsError,
} = useQuery<ServiceDataProps>({
queryKey: [servicename, minTime, maxTime],
queryFn: (): Promise<ServiceDataProps> =>
getTopLevelOperations({
service: servicename || '',
start: minTime,
end: maxTime,
}),
});
const selectedTraceTags: string = JSON.stringify(
convertRawQueriesToTraceSelectedTags(queries) || [],
);
@@ -119,6 +137,14 @@ function Application(): JSX.Element {
[queries],
);
const topLevelOperationsRoute = useMemo(
() =>
topLevelOperations
? defaultTo(topLevelOperations[servicename || ''], [])
: [],
[servicename, topLevelOperations],
);
const operationPerSecWidget = useMemo(
() =>
getWidgetQueryBuilder({

View File

@@ -0,0 +1,224 @@
/* eslint-disable */
//@ts-nocheck
import { Select, Space, Typography } from 'antd';
import Graph from 'components/Graph';
import { useEffect, useState } from 'react';
import { connect, useSelector } from 'react-redux';
import { withRouter } from 'react-router-dom';
import { GetService, getUsageData, UsageDataItem } from 'store/actions';
import { AppState } from 'store/reducers';
import { GlobalTime } from 'types/actions/globalTime';
import { GlobalReducer } from 'types/reducer/globalTime';
import MetricReducer from 'types/reducer/metrics';
import { isOnboardingSkipped } from 'utils/app';
import { Card } from './styles';
const { Option } = Select;
interface UsageExplorerProps {
usageData: UsageDataItem[];
getUsageData: (
minTime: number,
maxTime: number,
selectedInterval: number,
selectedService: string,
) => void;
getServicesList: ({
selectedTimeInterval,
}: {
selectedTimeInterval: GlobalReducer['selectedTime'];
}) => void;
globalTime: GlobalTime;
servicesList: servicesListItem[];
totalCount: number;
}
const timeDaysOptions = [
{ value: 30, label: 'Last 30 Days' },
{ value: 7, label: 'Last week' },
{ value: 1, label: 'Last day' },
];
const interval = [
{
value: 604800,
chartDivideMultiplier: 1,
label: 'Weekly',
applicableOn: [timeDaysOptions[0]],
},
{
value: 86400,
chartDivideMultiplier: 30,
label: 'Daily',
applicableOn: [timeDaysOptions[0], timeDaysOptions[1]],
},
{
value: 3600,
chartDivideMultiplier: 10,
label: 'Hours',
applicableOn: [timeDaysOptions[2], timeDaysOptions[1]],
},
];
function _UsageExplorer(props: UsageExplorerProps): JSX.Element {
const [selectedTime, setSelectedTime] = useState(timeDaysOptions[1]);
const [selectedInterval, setSelectedInterval] = useState(interval[2]);
const [selectedService, setSelectedService] = useState<string>('');
const { selectedTime: globalSelectedTime } = useSelector<
AppState,
GlobalReducer
>((state) => state.globalTime);
const {
getServicesList,
getUsageData,
globalTime,
totalCount,
usageData,
} = props;
const { services } = useSelector<AppState, MetricReducer>(
(state) => state.metrics,
);
useEffect(() => {
if (selectedTime && selectedInterval) {
const maxTime = new Date().getTime() * 1000000;
const minTime = maxTime - selectedTime.value * 24 * 3600000 * 1000000;
getUsageData(minTime, maxTime, selectedInterval.value, selectedService);
}
}, [selectedTime, selectedInterval, selectedService, getUsageData]);
useEffect(() => {
getServicesList({
selectedTimeInterval: globalSelectedTime,
});
}, [globalTime, getServicesList, globalSelectedTime]);
const data = {
labels: usageData.map((s) => new Date(s.timestamp / 1000000)),
datasets: [
{
label: 'Span Count',
data: usageData.map((s) => s.count),
backgroundColor: 'rgba(255, 99, 132, 0.2)',
borderColor: 'rgba(255, 99, 132, 1)',
borderWidth: 2,
},
],
};
return (
<>
<Space style={{ marginTop: 40, marginLeft: 20 }}>
<Space>
<Select
onSelect={(value): void => {
setSelectedTime(
timeDaysOptions.filter((item) => item.value == parseInt(value))[0],
);
}}
value={selectedTime.label}
>
{timeDaysOptions.map(({ value, label }) => (
<Option key={value} value={value}>
{label}
</Option>
))}
</Select>
</Space>
<Space>
<Select
onSelect={(value): void => {
setSelectedInterval(
interval.filter((item) => item.value === parseInt(value))[0],
);
}}
value={selectedInterval.label}
>
{interval
.filter((interval) => interval.applicableOn.includes(selectedTime))
.map((item) => (
<Option key={item.label} value={item.value}>
{item.label}
</Option>
))}
</Select>
</Space>
<Space>
<Select
onSelect={(value): void => {
setSelectedService(value);
}}
value={selectedService || 'All Services'}
>
<Option value="">All Services</Option>
{services?.map((service) => (
<Option key={service.serviceName} value={service.serviceName}>
{service.serviceName}
</Option>
))}
</Select>
</Space>
{isOnboardingSkipped() && totalCount === 0 ? (
<Space
style={{
width: '100%',
margin: '40px 0',
marginLeft: 20,
justifyContent: 'center',
}}
>
<Typography>
No spans found. Please add instrumentation (follow this
<a
href="https://signoz.io/docs/instrumentation/overview"
target="_blank"
style={{ marginLeft: 3 }}
rel="noreferrer"
>
guide
</a>
)
</Typography>
</Space>
) : (
<Space style={{ display: 'block', marginLeft: 20, width: 200 }}>
<Typography>{`Total count is ${totalCount}`}</Typography>
</Space>
)}
</Space>
<Card>
<Graph name="usage" data={data} type="bar" />
</Card>
</>
);
}
const mapStateToProps = (
state: AppState,
): {
totalCount: number;
globalTime: GlobalTime;
usageData: UsageDataItem[];
} => {
let totalCount = 0;
for (const item of state.usageDate) {
totalCount += item.count;
}
return {
totalCount,
usageData: state.usageDate,
globalTime: state.globalTime,
};
};
export const UsageExplorer = withRouter(
connect(mapStateToProps, {
getUsageData,
getServicesList: GetService,
})(_UsageExplorer),
);

View File

@@ -0,0 +1,7 @@
import { UsageExplorer } from './UsageExplorer';
function UsageExplorerContainer(): JSX.Element {
return <UsageExplorer />;
}
export default UsageExplorerContainer;

View File

@@ -0,0 +1,13 @@
import { Card as CardComponent } from 'antd';
import styled from 'styled-components';
export const Card = styled(CardComponent)`
&&& {
width: 90%;
margin-top: 2rem;
}
.ant-card-body {
height: 70vh;
}
`;

View File

@@ -2,3 +2,4 @@ export * from './global';
export * from './metrics';
export * from './serviceMap';
export * from './types';
export * from './usage';

View File

@@ -0,0 +1,34 @@
import GetLogs from 'api/logs/GetLogs';
import { Dispatch } from 'redux';
import AppActions from 'types/actions';
import { SET_LOADING, SET_LOGS } from 'types/actions/logs';
import { Props } from 'types/api/logs/getLogs';
export const getLogs = (
props: Props,
): ((dispatch: Dispatch<AppActions>) => void) => async (
dispatch,
): Promise<void> => {
dispatch({
type: SET_LOADING,
payload: true,
});
const response = await GetLogs(props);
if (response.payload)
dispatch({
type: SET_LOGS,
payload: response.payload,
});
else
dispatch({
type: SET_LOGS,
payload: [],
});
dispatch({
type: SET_LOADING,
payload: false,
});
};

View File

@@ -1,14 +1,17 @@
import { ServiceMapItemAction, ServiceMapLoading } from './serviceMap';
import { GetUsageDataAction } from './usage';
export enum ActionTypes {
updateTimeInterval = 'UPDATE_TIME_INTERVAL',
getServiceMapItems = 'GET_SERVICE_MAP_ITEMS',
getServices = 'GET_SERVICES',
getUsageData = 'GET_USAGE_DATE',
fetchTraces = 'FETCH_TRACES',
fetchTraceItem = 'FETCH_TRACE_ITEM',
serviceMapLoading = 'UPDATE_SERVICE_MAP_LOADING',
}
export type Action =
| GetUsageDataAction
| ServiceMapItemAction
| ServiceMapLoading;

View File

@@ -0,0 +1,34 @@
import api from 'api';
import { Dispatch } from 'redux';
import { toUTCEpoch } from 'utils/timeUtils';
import { ActionTypes } from './types';
export interface UsageDataItem {
timestamp: number;
count: number;
}
export interface GetUsageDataAction {
type: ActionTypes.getUsageData;
payload: UsageDataItem[];
}
export const getUsageData = (
minTime: number,
maxTime: number,
step: number,
service: string,
) => async (dispatch: Dispatch): Promise<void> => {
const requesString = `/usage?start=${toUTCEpoch(minTime)}&end=${toUTCEpoch(
maxTime,
)}&step=${step}&service=${service || ''}`;
// Step can only be multiple of 3600
const response = await api.get<UsageDataItem[]>(requesString);
dispatch<GetUsageDataAction>({
type: ActionTypes.getUsageData,
payload: response.data,
// PNOTE - response.data in the axios response has the actual API response
});
};

View File

@@ -6,9 +6,11 @@ import { LogsReducer } from './logs';
import metricsReducers from './metric';
import { ServiceMapReducer } from './serviceMap';
import traceReducer from './trace';
import { usageDataReducer } from './usage';
const reducers = combineReducers({
traces: traceReducer,
usageDate: usageDataReducer,
globalTime: globalTimeReducer,
serviceMap: ServiceMapReducer,
app: appReducer,

View File

@@ -0,0 +1,14 @@
/* eslint-disable sonarjs/no-small-switch */
import { Action, ActionTypes, UsageDataItem } from 'store/actions';
export const usageDataReducer = (
state: UsageDataItem[] = [{ timestamp: 0, count: 0 }],
action: Action,
): UsageDataItem[] => {
switch (action.type) {
case ActionTypes.getUsageData:
return action.payload;
default:
return state;
}
};

View File

@@ -3214,6 +3214,11 @@
dependencies:
"@babel/runtime" "^7.13.10"
"@radix-ui/react-compose-refs@1.1.2":
version "1.1.2"
resolved "https://registry.yarnpkg.com/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz#a2c4c47af6337048ee78ff6dc0d090b390d2bb30"
integrity sha512-z4eqJvfiNnFMHIIvXP3CY57y2WJs5g2v3X0zm9mEJkrkNv4rDxu+sg9Jh8EkXyeqBkB7SOcboo9dMVqhyrACIg==
"@radix-ui/react-context@1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@radix-ui/react-context/-/react-context-1.0.1.tgz#fe46e67c96b240de59187dcb7a1a50ce3e2ec00c"
@@ -3240,6 +3245,11 @@
"@radix-ui/react-use-callback-ref" "1.0.1"
"@radix-ui/react-use-escape-keydown" "1.0.3"
"@radix-ui/react-icons@^1.3.0":
version "1.3.2"
resolved "https://registry.yarnpkg.com/@radix-ui/react-icons/-/react-icons-1.3.2.tgz#09be63d178262181aeca5fb7f7bc944b10a7f441"
integrity sha512-fyQIhGDhzfc9pK2kH6Pl9c4BDJGfMkPqkyIgYDthyNYoNg3wVhoJMMh19WS4Up/1KMPFVpNsT2q3WmXn2N1m6g==
"@radix-ui/react-id@1.0.1":
version "1.0.1"
resolved "https://registry.yarnpkg.com/@radix-ui/react-id/-/react-id-1.0.1.tgz#73cdc181f650e4df24f0b6a5b7aa426b912c88c0"
@@ -3314,6 +3324,13 @@
"@babel/runtime" "^7.13.10"
"@radix-ui/react-compose-refs" "1.0.1"
"@radix-ui/react-slot@^1.1.0":
version "1.2.0"
resolved "https://registry.yarnpkg.com/@radix-ui/react-slot/-/react-slot-1.2.0.tgz#57727fc186ddb40724ccfbe294e1a351d92462ba"
integrity sha512-ujc+V6r0HNDviYqIK3rW4ffgYiZ8g5DEHrGJVk4x7kTlLXRDILnKX9vAUYeIsLOoDpDJ0ujpqMkjH4w2ofuo6w==
dependencies:
"@radix-ui/react-compose-refs" "1.1.2"
"@radix-ui/react-tabs@1.0.4":
version "1.0.4"
resolved "https://registry.yarnpkg.com/@radix-ui/react-tabs/-/react-tabs-1.0.4.tgz#993608eec55a5d1deddd446fa9978d2bc1053da2"
@@ -3654,6 +3671,20 @@
unplugin "1.0.1"
uuid "^9.0.0"
"@signozhq/button@0.0.0":
version "0.0.0"
resolved "https://registry.yarnpkg.com/@signozhq/button/-/button-0.0.0.tgz#802b1287acc2f6a09bf7fb05d9f4908a32a06373"
integrity sha512-U+IYj0eGqCxwofhE7Wn5Am5puGh8GQr8xDJGEy1lPPc58lOsE6w9e/bNW2efvATorTG/1FrizxaVz/x6eBpACQ==
dependencies:
"@radix-ui/react-icons" "^1.3.0"
"@radix-ui/react-slot" "^1.1.0"
class-variance-authority "^0.7.0"
clsx "^2.1.1"
lucide-react "^0.445.0"
react "^18.2.0"
tailwind-merge "^2.5.2"
tailwindcss-animate "^1.0.7"
"@signozhq/design-tokens@1.1.4":
version "1.1.4"
resolved "https://registry.yarnpkg.com/@signozhq/design-tokens/-/design-tokens-1.1.4.tgz#5d5de5bd9d19b6a3631383db015cc4b70c3f7661"
@@ -6495,6 +6526,13 @@ cjs-module-lexer@^1.0.0:
resolved "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.2.2.tgz"
integrity sha512-cOU9usZw8/dXIXKtwa8pM0OTJQuJkxMN6w30csNRUerHfeQ5R6U3kkU/FtJeIf3M202OHfY2U8ccInBG7/xogA==
class-variance-authority@^0.7.0:
version "0.7.1"
resolved "https://registry.yarnpkg.com/class-variance-authority/-/class-variance-authority-0.7.1.tgz#4008a798a0e4553a781a57ac5177c9fb5d043787"
integrity sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==
dependencies:
clsx "^2.1.1"
classnames@2.3.2, classnames@2.x, classnames@^2.2.1, classnames@^2.2.3, classnames@^2.2.5, classnames@^2.2.6, classnames@^2.3.1, classnames@^2.3.2:
version "2.3.2"
resolved "https://registry.npmjs.org/classnames/-/classnames-2.3.2.tgz"
@@ -6589,6 +6627,11 @@ clsx@^1.1.1:
resolved "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz"
integrity sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==
clsx@^2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/clsx/-/clsx-2.1.1.tgz#eed397c9fd8bd882bfb18deab7102049a2f32999"
integrity sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==
co@^4.6.0:
version "4.6.0"
resolved "https://registry.npmjs.org/co/-/co-4.6.0.tgz"
@@ -11883,6 +11926,11 @@ lucide-react@0.427.0:
resolved "https://registry.yarnpkg.com/lucide-react/-/lucide-react-0.427.0.tgz#e06974514bbd591049f9d736b3d3ae99d4ede8c9"
integrity sha512-lv9s6c5BDF/ccuA0EgTdskTxIe11qpwBDmzRZHJAKtp8LTewAvDvOM+pTES9IpbBuTqkjiMhOmGpJ/CB+mKjFw==
lucide-react@^0.445.0:
version "0.445.0"
resolved "https://registry.yarnpkg.com/lucide-react/-/lucide-react-0.445.0.tgz#35c42341e98fbf0475b2a6cf74fd25ef7cbfcd62"
integrity sha512-YrLf3aAHvmd4dZ8ot+mMdNFrFpJD7YRwQ2pUcBhgqbmxtrMP4xDzIorcj+8y+6kpuXBF4JB0NOCTUWIYetJjgA==
lz-string@^1.4.4:
version "1.5.0"
resolved "https://registry.npmjs.org/lz-string/-/lz-string-1.5.0.tgz"
@@ -15160,6 +15208,13 @@ react@18.2.0:
dependencies:
loose-envify "^1.1.0"
react@^18.2.0:
version "18.3.1"
resolved "https://registry.yarnpkg.com/react/-/react-18.3.1.tgz#49ab892009c53933625bd16b2533fc754cab2891"
integrity sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==
dependencies:
loose-envify "^1.1.0"
read-pkg-up@^7.0.1:
version "7.0.1"
resolved "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-7.0.1.tgz"
@@ -16655,6 +16710,16 @@ table@^6.0.9:
string-width "^4.2.3"
strip-ansi "^6.0.1"
tailwind-merge@^2.5.2:
version "2.6.0"
resolved "https://registry.yarnpkg.com/tailwind-merge/-/tailwind-merge-2.6.0.tgz#ac5fb7e227910c038d458f396b7400d93a3142d5"
integrity sha512-P+Vu1qXfzediirmHOC3xKGAYeZtPcV9g76X+xg2FD4tYgR71ewMA35Y3sCz3zhiN/dwefRpJX0yBcgwi1fXNQA==
tailwindcss-animate@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/tailwindcss-animate/-/tailwindcss-animate-1.0.7.tgz#318b692c4c42676cc9e67b19b78775742388bef4"
integrity sha512-bl6mpH3T7I3UFxuvDEXLxy/VuFxBk5bbzplh7tXI68mwMokNYd1t9qPBHlnyTwfa4JGC4zP516I1hYYtQ/vspA==
tapable@^2.0.0, tapable@^2.1.1, tapable@^2.2.0:
version "2.2.1"
resolved "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz"

View File

@@ -17,6 +17,8 @@ const (
const (
defaultTraceDB string = "signoz_traces"
defaultOperationsTable string = "distributed_signoz_operations"
defaultIndexTable string = "distributed_signoz_index_v2"
defaultLocalIndexTable string = "signoz_index_v2"
defaultErrorTable string = "distributed_signoz_error_index_v2"
defaultDurationTable string = "distributed_durationSort"
@@ -57,10 +59,19 @@ type namespaceConfig struct {
Enabled bool
Datasource string
TraceDB string
ErrorTable string
OperationsTable string
IndexTable string
LocalIndexTable string
DurationTable string
UsageExplorerTable string
SpansTable string
ErrorTable string
SpanAttributeTableV2 string
SpanAttributeKeysTable string
DependencyGraphTable string
TopLevelOperationsTable string
LogsDB string
LogsTable string
LogsLocalTable string
LogsAttributeKeysTable string
LogsResourceKeysTable string
@@ -71,7 +82,6 @@ type namespaceConfig struct {
Encoding Encoding
Connector Connector
LogsDB string
LogsLocalTableV2 string
LogsTableV2 string
LogsResourceLocalTableV2 string

File diff suppressed because it is too large Load Diff

View File

@@ -49,6 +49,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/app/querier"
querierV2 "github.com/SigNoz/signoz/pkg/query-service/app/querier/v2"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
"github.com/SigNoz/signoz/pkg/query-service/auth"
"github.com/SigNoz/signoz/pkg/query-service/cache"
@@ -117,6 +118,9 @@ type APIHandler struct {
// Websocket connection upgrader
Upgrader *websocket.Upgrader
UseLogsNewSchema bool
UseTraceNewSchema bool
hostsRepo *inframetrics.HostsRepo
processesRepo *inframetrics.ProcessesRepo
podsRepo *inframetrics.PodsRepo
@@ -173,6 +177,11 @@ type APIHandlerOpts struct {
// Querier Influx Interval
FluxInterval time.Duration
// Use Logs New schema
UseLogsNewSchema bool
UseTraceNewSchema bool
JWT *authtypes.JWT
AlertmanagerAPI *alertmanager.API
@@ -185,17 +194,21 @@ type APIHandlerOpts struct {
// NewAPIHandler returns an APIHandler
func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
querierOpts := querier.QuerierOptions{
Reader: opts.Reader,
Cache: opts.Cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: opts.FluxInterval,
Reader: opts.Reader,
Cache: opts.Cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
}
querierOptsV2 := querierV2.QuerierOptions{
Reader: opts.Reader,
Cache: opts.Cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: opts.FluxInterval,
Reader: opts.Reader,
Cache: opts.Cache,
KeyGenerator: queryBuilder.NewKeyGenerator(),
FluxInterval: opts.FluxInterval,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
}
querier := querier.NewQuerier(querierOpts)
@@ -226,6 +239,8 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
LogsParsingPipelineController: opts.LogsParsingPipelineController,
querier: querier,
querierV2: querierv2,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
hostsRepo: hostsRepo,
processesRepo: processesRepo,
podsRepo: podsRepo,
@@ -244,8 +259,15 @@ func NewAPIHandler(opts APIHandlerOpts) (*APIHandler, error) {
FieldsAPI: opts.FieldsAPI,
}
logsQueryBuilder := logsv4.PrepareLogsQuery
tracesQueryBuilder := tracesV4.PrepareTracesQuery
logsQueryBuilder := logsv3.PrepareLogsQuery
if opts.UseLogsNewSchema {
logsQueryBuilder = logsv4.PrepareLogsQuery
}
tracesQueryBuilder := tracesV3.PrepareTracesQuery
if opts.UseTraceNewSchema {
tracesQueryBuilder = tracesV4.PrepareTracesQuery
}
builderOpts := queryBuilder.QueryBuilderOptions{
BuildMetricQuery: metricsv3.PrepareMetricQuery,
@@ -528,8 +550,12 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
// router.HandleFunc("/api/v1/get_percentiles", aH.getApplicationPercentiles).Methods(http.MethodGet)
router.HandleFunc("/api/v1/services", am.ViewAccess(aH.getServices)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/services/list", am.ViewAccess(aH.getServicesList)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/service/top_operations", am.ViewAccess(aH.getTopOperations)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/service/top_level_operations", am.ViewAccess(aH.getServicesTopLevelOps)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/traces/{traceId}", am.ViewAccess(aH.SearchTraces)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/usage", am.ViewAccess(aH.getUsage)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/dependency_graph", am.ViewAccess(aH.dependencyGraph)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/settings/ttl", am.AdminAccess(aH.setTTL)).Methods(http.MethodPost)
router.HandleFunc("/api/v1/settings/ttl", am.ViewAccess(aH.getTTL)).Methods(http.MethodGet)
router.HandleFunc("/api/v1/settings/apdex", am.AdminAccess(aH.setApdexSettings)).Methods(http.MethodPost)
@@ -1598,13 +1624,122 @@ func (aH *APIHandler) getTopOperations(w http.ResponseWriter, r *http.Request) {
}
func (aH *APIHandler) getUsage(w http.ResponseWriter, r *http.Request) {
query, err := parseGetUsageRequest(r)
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
result, err := aH.reader.GetUsage(r.Context(), query)
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) getServicesTopLevelOps(w http.ResponseWriter, r *http.Request) {
var start, end time.Time
var services []string
type topLevelOpsParams struct {
Service string `json:"service"`
Start string `json:"start"`
End string `json:"end"`
}
var params topLevelOpsParams
err := json.NewDecoder(r.Body).Decode(&params)
if err != nil {
zap.L().Error("Error in getting req body for get top operations API", zap.Error(err))
}
if params.Service != "" {
services = []string{params.Service}
}
startEpoch := params.Start
if startEpoch != "" {
startEpochInt, err := strconv.ParseInt(startEpoch, 10, 64)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading start time")
return
}
start = time.Unix(0, startEpochInt)
}
endEpoch := params.End
if endEpoch != "" {
endEpochInt, err := strconv.ParseInt(endEpoch, 10, 64)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, "Error reading end time")
return
}
end = time.Unix(0, endEpochInt)
}
result, apiErr := aH.reader.GetTopLevelOperations(r.Context(), start, end, services)
if apiErr != nil {
RespondError(w, apiErr, nil)
return
}
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) {
query, err := parseGetServicesRequest(r)
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
result, apiErr := aH.reader.GetServices(r.Context(), query)
if apiErr != nil && aH.HandleError(w, apiErr.Err, http.StatusInternalServerError) {
return
}
data := map[string]interface{}{
"number": len(*result),
}
claims, errv2 := authtypes.ClaimsFromContext(r.Context())
if errv2 != nil {
telemetry.GetInstance().SendEvent(telemetry.TELEMETRY_EVENT_NUMBER_OF_SERVICES, data, claims.Email, true, false)
}
if (data["number"] != 0) && (data["number"] != telemetry.DEFAULT_NUMBER_OF_SERVICES) {
telemetry.GetInstance().AddActiveTracesUser()
}
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) dependencyGraph(w http.ResponseWriter, r *http.Request) {
query, err := parseGetServicesRequest(r)
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
result, err := aH.reader.GetDependencyGraph(r.Context(), query)
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) getServicesList(w http.ResponseWriter, r *http.Request) {
result, err := aH.reader.GetServicesList(r.Context())
if aH.HandleError(w, err, http.StatusBadRequest) {
return
}
aH.WriteJSON(w, r, result)
}
func (aH *APIHandler) SearchTraces(w http.ResponseWriter, r *http.Request) {
@@ -4104,8 +4239,11 @@ func (aH *APIHandler) CloudIntegrationsUpdateServiceConfig(
// logs
func (aH *APIHandler) RegisterLogsRoutes(router *mux.Router, am *middleware.AuthZ) {
subRouter := router.PathPrefix("/api/v1/logs").Subrouter()
subRouter.HandleFunc("", am.ViewAccess(aH.getLogs)).Methods(http.MethodGet)
subRouter.HandleFunc("/tail", am.ViewAccess(aH.tailLogs)).Methods(http.MethodGet)
subRouter.HandleFunc("/fields", am.ViewAccess(aH.logFields)).Methods(http.MethodGet)
subRouter.HandleFunc("/fields", am.EditAccess(aH.logFieldUpdate)).Methods(http.MethodPost)
subRouter.HandleFunc("/aggregate", am.ViewAccess(aH.logAggregate)).Methods(http.MethodGet)
// log pipelines
subRouter.HandleFunc("/pipelines/preview", am.ViewAccess(aH.PreviewLogsPipelinesHandler)).Methods(http.MethodPost)
@@ -4145,6 +4283,81 @@ func (aH *APIHandler) logFieldUpdate(w http.ResponseWriter, r *http.Request) {
aH.WriteJSON(w, r, field)
}
func (aH *APIHandler) getLogs(w http.ResponseWriter, r *http.Request) {
params, err := logs.ParseLogFilterParams(r)
if err != nil {
apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErr, "Incorrect params")
return
}
res, apiErr := aH.reader.GetLogs(r.Context(), params)
if apiErr != nil {
RespondError(w, apiErr, "Failed to fetch logs from the DB")
return
}
aH.WriteJSON(w, r, map[string]interface{}{"results": res})
}
func (aH *APIHandler) tailLogs(w http.ResponseWriter, r *http.Request) {
params, err := logs.ParseLogFilterParams(r)
if err != nil {
apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErr, "Incorrect params")
return
}
// create the client
client := &model.LogsTailClient{Name: r.RemoteAddr, Logs: make(chan *model.SignozLog, 1000), Done: make(chan *bool), Error: make(chan error), Filter: *params}
go aH.reader.TailLogs(r.Context(), client)
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
flusher, ok := w.(http.Flusher)
if !ok {
err := model.ApiError{Typ: model.ErrorStreamingNotSupported, Err: nil}
RespondError(w, &err, "streaming is not supported")
return
}
// flush the headers
flusher.Flush()
for {
select {
case log := <-client.Logs:
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(log)
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
zap.L().Debug("done!")
return
case err := <-client.Error:
zap.L().Error("error occured", zap.Error(err))
return
}
}
}
func (aH *APIHandler) logAggregate(w http.ResponseWriter, r *http.Request) {
params, err := logs.ParseLogAggregateParams(r)
if err != nil {
apiErr := &model.ApiError{Typ: model.ErrorBadData, Err: err}
RespondError(w, apiErr, "Incorrect params")
return
}
res, apiErr := aH.reader.AggregateLogs(r.Context(), params)
if apiErr != nil {
RespondError(w, apiErr, "Failed to fetch logs aggregate from the DB")
return
}
aH.WriteJSON(w, r, res)
}
const logPipelines = "log_pipelines"
func parseAgentConfigVersion(r *http.Request) (int, *model.ApiError) {
@@ -4626,10 +4839,30 @@ func (aH *APIHandler) queryRangeV3(ctx context.Context, queryRangeParams *v3.Que
RespondError(w, apiErrObj, errQuriesByName)
return
}
tracesV4.Enrich(queryRangeParams, spanKeys)
if aH.UseTraceNewSchema {
tracesV4.Enrich(queryRangeParams, spanKeys)
} else {
tracesV3.Enrich(queryRangeParams, spanKeys)
}
}
// WARN: Only works for AND operator in traces query
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
// check if traceID is used as filter (with equal/similar operator) in traces query if yes add timestamp filter to queryRange params
isUsed, traceIDs := tracesV3.TraceIdFilterUsedWithEqual(queryRangeParams)
if isUsed && len(traceIDs) > 0 {
zap.L().Debug("traceID used as filter in traces query")
// query signoz_spans table with traceID to get min and max timestamp
min, max, err := aH.reader.GetMinAndMaxTimestampForTraceID(ctx, traceIDs)
if err == nil {
// add timestamp filter to queryRange params
tracesV3.AddTimestampFilters(min, max, queryRangeParams)
zap.L().Debug("post adding timestamp filter in traces query", zap.Any("queryRangeParams", queryRangeParams))
}
}
}
// Hook up query progress tracking if requested
queryIdHeader := r.Header.Get("X-SIGNOZ-QUERY-ID")
if len(queryIdHeader) > 0 {
@@ -4969,7 +5202,88 @@ func (aH *APIHandler) liveTailLogsV2(w http.ResponseWriter, r *http.Request) {
}
func (aH *APIHandler) liveTailLogs(w http.ResponseWriter, r *http.Request) {
aH.liveTailLogsV2(w, r)
if aH.UseLogsNewSchema {
aH.liveTailLogsV2(w, r)
return
}
// get the param from url and add it to body
stringReader := strings.NewReader(r.URL.Query().Get("q"))
r.Body = io.NopCloser(stringReader)
queryRangeParams, apiErrorObj := ParseQueryRangeParams(r)
if apiErrorObj != nil {
zap.L().Error(apiErrorObj.Err.Error())
RespondError(w, apiErrorObj, nil)
return
}
var err error
var queryString string
switch queryRangeParams.CompositeQuery.QueryType {
case v3.QueryTypeBuilder:
// check if any enrichment is required for logs if yes then enrich them
if logsv3.EnrichmentRequired(queryRangeParams) {
logsFields, err := aH.reader.GetLogFields(r.Context())
if err != nil {
apiErrObj := &model.ApiError{Typ: model.ErrorInternal, Err: err}
RespondError(w, apiErrObj, nil)
return
}
// get the fields if any logs query is present
fields := model.GetLogFieldsV3(r.Context(), queryRangeParams, logsFields)
logsv3.Enrich(queryRangeParams, fields)
}
queryString, err = aH.queryBuilder.PrepareLiveTailQuery(queryRangeParams)
if err != nil {
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
default:
err = fmt.Errorf("invalid query type")
RespondError(w, &model.ApiError{Typ: model.ErrorBadData, Err: err}, nil)
return
}
// create the client
client := &model.LogsLiveTailClient{Name: r.RemoteAddr, Logs: make(chan *model.SignozLog, 1000), Done: make(chan *bool), Error: make(chan error)}
go aH.reader.LiveTailLogsV3(r.Context(), queryString, uint64(queryRangeParams.Start), "", client)
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Access-Control-Allow-Origin", "*")
w.WriteHeader(200)
flusher, ok := w.(http.Flusher)
if !ok {
err := model.ApiError{Typ: model.ErrorStreamingNotSupported, Err: nil}
RespondError(w, &err, "streaming is not supported")
return
}
// flush the headers
flusher.Flush()
for {
select {
case log := <-client.Logs:
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
enc.Encode(log)
fmt.Fprintf(w, "data: %v\n\n", buf.String())
flusher.Flush()
case <-client.Done:
zap.L().Debug("done!")
return
case err := <-client.Error:
zap.L().Error("error occurred", zap.Error(err))
fmt.Fprintf(w, "event: error\ndata: %v\n\n", err.Error())
flusher.Flush()
return
}
}
}
func (aH *APIHandler) getMetricMetadata(w http.ResponseWriter, r *http.Request) {
@@ -5010,7 +5324,27 @@ func (aH *APIHandler) queryRangeV4(ctx context.Context, queryRangeParams *v3.Que
RespondError(w, apiErrObj, errQuriesByName)
return
}
tracesV4.Enrich(queryRangeParams, spanKeys)
if aH.UseTraceNewSchema {
tracesV4.Enrich(queryRangeParams, spanKeys)
} else {
tracesV3.Enrich(queryRangeParams, spanKeys)
}
}
// WARN: Only works for AND operator in traces query
if queryRangeParams.CompositeQuery.QueryType == v3.QueryTypeBuilder {
// check if traceID is used as filter (with equal/similar operator) in traces query if yes add timestamp filter to queryRange params
isUsed, traceIDs := tracesV3.TraceIdFilterUsedWithEqual(queryRangeParams)
if isUsed && len(traceIDs) > 0 {
zap.L().Debug("traceID used as filter in traces query")
// query signoz_spans table with traceID to get min and max timestamp
min, max, err := aH.reader.GetMinAndMaxTimestampForTraceID(ctx, traceIDs)
if err == nil {
// add timestamp filter to queryRange params
tracesV3.AddTimestampFilters(min, max, queryRangeParams)
zap.L().Debug("post adding timestamp filter in traces query", zap.Any("queryRangeParams", queryRangeParams))
}
}
}
result, errQuriesByName, err = aH.querierV2.QueryRange(ctx, queryRangeParams)

View File

@@ -171,6 +171,42 @@ func parseQueryRangeRequest(r *http.Request) (*model.QueryRangeParams, *model.Ap
return &queryRangeParams, nil
}
func parseGetUsageRequest(r *http.Request) (*model.GetUsageParams, error) {
startTime, err := parseTime("start", r)
if err != nil {
return nil, err
}
endTime, err := parseTime("end", r)
if err != nil {
return nil, err
}
stepStr := r.URL.Query().Get("step")
if len(stepStr) == 0 {
return nil, errors.New("step param missing in query")
}
stepInt, err := strconv.Atoi(stepStr)
if err != nil {
return nil, errors.New("step param is not in correct format")
}
serviceName := r.URL.Query().Get("service")
stepHour := stepInt / 3600
getUsageParams := model.GetUsageParams{
StartTime: startTime.Format(time.RFC3339Nano),
EndTime: endTime.Format(time.RFC3339Nano),
Start: startTime,
End: endTime,
ServiceName: serviceName,
Period: fmt.Sprintf("PT%dH", stepHour),
StepHour: stepHour,
}
return &getUsageParams, nil
}
func parseGetServicesRequest(r *http.Request) (*model.GetServicesParams, error) {
var postData *model.GetServicesParams

View File

@@ -6,8 +6,10 @@ import (
"strings"
"sync"
logsV3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
"github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/constants"
@@ -17,15 +19,19 @@ import (
"go.uber.org/zap"
)
func prepareLogsQuery(
_ context.Context,
func prepareLogsQuery(_ context.Context,
useLogsNewSchema bool,
start,
end int64,
builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3,
) (string, error) {
query := ""
logsQueryBuilder := logsV4.PrepareLogsQuery
logsQueryBuilder := logsV3.PrepareLogsQuery
if useLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
if params == nil || builderQuery == nil {
return query, fmt.Errorf("params and builderQuery cannot be nil")
@@ -96,7 +102,7 @@ func (q *querier) runBuilderQuery(
var err error
if _, ok := cacheKeys[queryName]; !ok || params.NoCache {
zap.L().Info("skipping cache for logs query", zap.String("queryName", queryName), zap.Int64("start", start), zap.Int64("end", end), zap.Int64("step", builderQuery.StepInterval), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName]))
query, err = prepareLogsQuery(ctx, start, end, builderQuery, params)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -111,7 +117,7 @@ func (q *querier) runBuilderQuery(
missedSeries := make([]querycache.CachedSeriesData, 0)
filteredMissedSeries := make([]querycache.CachedSeriesData, 0)
for _, miss := range misses {
query, err = prepareLogsQuery(ctx, miss.Start, miss.End, builderQuery, params)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.Start, miss.End, builderQuery, params)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -163,7 +169,11 @@ func (q *querier) runBuilderQuery(
}
if builderQuery.DataSource == v3.DataSourceTraces {
tracesQueryBuilder := tracesV4.PrepareTracesQuery
tracesQueryBuilder := tracesV3.PrepareTracesQuery
if q.UseTraceNewSchema {
tracesQueryBuilder = tracesV4.PrepareTracesQuery
}
var query string
var err error

View File

@@ -6,9 +6,11 @@ import (
"sync"
"time"
logsV3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
"github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/constants"
@@ -50,6 +52,9 @@ type querier struct {
timeRanges [][]int
returnedSeries []*v3.Series
returnedErr error
UseLogsNewSchema bool
UseTraceNewSchema bool
}
type QuerierOptions struct {
@@ -59,14 +64,22 @@ type QuerierOptions struct {
FluxInterval time.Duration
// used for testing
TestingMode bool
ReturnedSeries []*v3.Series
ReturnedErr error
TestingMode bool
ReturnedSeries []*v3.Series
ReturnedErr error
UseLogsNewSchema bool
UseTraceNewSchema bool
}
func NewQuerier(opts QuerierOptions) interfaces.Querier {
logsQueryBuilder := logsV4.PrepareLogsQuery
tracesQueryBuilder := tracesV4.PrepareTracesQuery
logsQueryBuilder := logsV3.PrepareLogsQuery
if opts.UseLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
tracesQueryBuilder := tracesV3.PrepareTracesQuery
if opts.UseTraceNewSchema {
tracesQueryBuilder = tracesV4.PrepareTracesQuery
}
qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval))
@@ -83,9 +96,11 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
BuildMetricQuery: metricsV3.PrepareMetricQuery,
}),
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
}
}
@@ -430,6 +445,11 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
len(params.CompositeQuery.BuilderQueries) == 1 &&
params.CompositeQuery.PanelType != v3.PanelTypeTrace {
for _, v := range params.CompositeQuery.BuilderQueries {
if (v.DataSource == v3.DataSourceLogs && !q.UseLogsNewSchema) ||
(v.DataSource == v3.DataSourceTraces && !q.UseTraceNewSchema) {
break
}
// only allow of logs queries with timestamp ordering desc
// TODO(nitya): allow for timestamp asc
if (v.DataSource == v3.DataSourceLogs || v.DataSource == v3.DataSourceTraces) &&

View File

@@ -1370,6 +1370,8 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
telemetryStore,
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
"",
true,
true,
time.Duration(time.Second),
nil,
)

View File

@@ -6,9 +6,11 @@ import (
"strings"
"sync"
logsV3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
metricsV3 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v3"
metricsV4 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v4"
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
"github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/constants"
@@ -17,14 +19,17 @@ import (
"go.uber.org/zap"
)
func prepareLogsQuery(
_ context.Context,
func prepareLogsQuery(_ context.Context,
useLogsNewSchema bool,
start,
end int64,
builderQuery *v3.BuilderQuery,
params *v3.QueryRangeParamsV3,
) (string, error) {
logsQueryBuilder := logsV4.PrepareLogsQuery
logsQueryBuilder := logsV3.PrepareLogsQuery
if useLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
query := ""
if params == nil || builderQuery == nil {
@@ -97,7 +102,7 @@ func (q *querier) runBuilderQuery(
var err error
if _, ok := cacheKeys[queryName]; !ok || params.NoCache {
zap.L().Info("skipping cache for logs query", zap.String("queryName", queryName), zap.Int64("start", params.Start), zap.Int64("end", params.End), zap.Int64("step", params.Step), zap.Bool("noCache", params.NoCache), zap.String("cacheKey", cacheKeys[queryName]))
query, err = prepareLogsQuery(ctx, start, end, builderQuery, params)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, start, end, builderQuery, params)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -111,7 +116,7 @@ func (q *querier) runBuilderQuery(
missedSeries := make([]querycache.CachedSeriesData, 0)
filteredMissedSeries := make([]querycache.CachedSeriesData, 0)
for _, miss := range misses {
query, err = prepareLogsQuery(ctx, miss.Start, miss.End, builderQuery, params)
query, err = prepareLogsQuery(ctx, q.UseLogsNewSchema, miss.Start, miss.End, builderQuery, params)
if err != nil {
ch <- channelResult{Err: err, Name: queryName, Query: query, Series: nil}
return
@@ -164,7 +169,11 @@ func (q *querier) runBuilderQuery(
}
if builderQuery.DataSource == v3.DataSourceTraces {
tracesQueryBuilder := tracesV4.PrepareTracesQuery
tracesQueryBuilder := tracesV3.PrepareTracesQuery
if q.UseTraceNewSchema {
tracesQueryBuilder = tracesV4.PrepareTracesQuery
}
var query string
var err error

View File

@@ -6,9 +6,11 @@ import (
"sync"
"time"
logsV3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
logsV4 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v4"
metricsV4 "github.com/SigNoz/signoz/pkg/query-service/app/metrics/v4"
"github.com/SigNoz/signoz/pkg/query-service/app/queryBuilder"
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
"github.com/SigNoz/signoz/pkg/query-service/common"
"github.com/SigNoz/signoz/pkg/query-service/constants"
@@ -47,9 +49,11 @@ type querier struct {
testingMode bool
queriesExecuted []string
// tuple of start and end time in milliseconds
timeRanges [][]int
returnedSeries []*v3.Series
returnedErr error
timeRanges [][]int
returnedSeries []*v3.Series
returnedErr error
UseLogsNewSchema bool
UseTraceNewSchema bool
}
type QuerierOptions struct {
@@ -59,14 +63,23 @@ type QuerierOptions struct {
FluxInterval time.Duration
// used for testing
TestingMode bool
ReturnedSeries []*v3.Series
ReturnedErr error
TestingMode bool
ReturnedSeries []*v3.Series
ReturnedErr error
UseLogsNewSchema bool
UseTraceNewSchema bool
}
func NewQuerier(opts QuerierOptions) interfaces.Querier {
logsQueryBuilder := logsV4.PrepareLogsQuery
tracesQueryBuilder := tracesV4.PrepareTracesQuery
logsQueryBuilder := logsV3.PrepareLogsQuery
if opts.UseLogsNewSchema {
logsQueryBuilder = logsV4.PrepareLogsQuery
}
tracesQueryBuilder := tracesV3.PrepareTracesQuery
if opts.UseTraceNewSchema {
tracesQueryBuilder = tracesV4.PrepareTracesQuery
}
qc := querycache.NewQueryCache(querycache.WithCache(opts.Cache), querycache.WithFluxInterval(opts.FluxInterval))
@@ -83,9 +96,11 @@ func NewQuerier(opts QuerierOptions) interfaces.Querier {
BuildMetricQuery: metricsV4.PrepareMetricQuery,
}),
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
testingMode: opts.TestingMode,
returnedSeries: opts.ReturnedSeries,
returnedErr: opts.ReturnedErr,
UseLogsNewSchema: opts.UseLogsNewSchema,
UseTraceNewSchema: opts.UseTraceNewSchema,
}
}
@@ -431,6 +446,11 @@ func (q *querier) runBuilderListQueries(ctx context.Context, params *v3.QueryRan
len(params.CompositeQuery.BuilderQueries) == 1 &&
params.CompositeQuery.PanelType != v3.PanelTypeTrace {
for _, v := range params.CompositeQuery.BuilderQueries {
if (v.DataSource == v3.DataSourceLogs && !q.UseLogsNewSchema) ||
(v.DataSource == v3.DataSourceTraces && !q.UseTraceNewSchema) {
break
}
// only allow of logs queries with timestamp ordering desc
// TODO(nitya): allow for timestamp asc
if (v.DataSource == v3.DataSourceLogs || v.DataSource == v3.DataSourceTraces) &&

View File

@@ -1424,6 +1424,8 @@ func Test_querier_runWindowBasedListQuery(t *testing.T) {
telemetryStore,
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
"",
true,
true,
time.Duration(time.Second),
nil,
)

View File

@@ -53,6 +53,8 @@ type ServerOptions struct {
FluxInterval string
FluxIntervalForTraceDetail string
Cluster string
UseLogsNewSchema bool
UseTraceNewSchema bool
SigNoz *signoz.SigNoz
Jwt *authtypes.JWT
}
@@ -108,6 +110,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.SigNoz.TelemetryStore,
serverOptions.SigNoz.Prometheus,
serverOptions.Cluster,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
fluxIntervalForTraceDetail,
serverOptions.SigNoz.Cache,
)
@@ -125,6 +129,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
serverOptions.SigNoz.SQLStore.SQLxDB(),
reader,
c,
serverOptions.UseLogsNewSchema,
serverOptions.UseTraceNewSchema,
serverOptions.SigNoz.SQLStore,
serverOptions.SigNoz.TelemetryStore,
serverOptions.SigNoz.Prometheus,
@@ -167,6 +173,8 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
LogsParsingPipelineController: logParsingPipelineController,
Cache: c,
FluxInterval: fluxInterval,
UseLogsNewSchema: serverOptions.UseLogsNewSchema,
UseTraceNewSchema: serverOptions.UseTraceNewSchema,
JWT: serverOptions.Jwt,
AlertmanagerAPI: alertmanager.NewAPI(serverOptions.SigNoz.Alertmanager),
FieldsAPI: fields.NewAPI(serverOptions.SigNoz.TelemetryStore),
@@ -427,21 +435,25 @@ func makeRulesManager(
db *sqlx.DB,
ch interfaces.Reader,
cache cache.Cache,
useLogsNewSchema bool,
useTraceNewSchema bool,
sqlstore sqlstore.SQLStore,
telemetryStore telemetrystore.TelemetryStore,
prometheus prometheus.Prometheus,
) (*rules.Manager, error) {
// create manager opts
managerOpts := &rules.ManagerOptions{
TelemetryStore: telemetryStore,
Prometheus: prometheus,
DBConn: db,
Context: context.Background(),
Logger: zap.L(),
Reader: ch,
Cache: cache,
EvalDelay: constants.GetEvalDelay(),
SQLStore: sqlstore,
TelemetryStore: telemetryStore,
Prometheus: prometheus,
DBConn: db,
Context: context.Background(),
Logger: zap.L(),
Reader: ch,
Cache: cache,
EvalDelay: constants.GetEvalDelay(),
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
SQLStore: sqlstore,
}
// create Manager

View File

@@ -15,8 +15,12 @@ import (
type Reader interface {
GetInstantQueryMetricsResult(ctx context.Context, query *model.InstantQueryMetricsParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
GetQueryRangeResult(ctx context.Context, query *model.QueryRangeParams) (*promql.Result, *stats.QueryStats, *model.ApiError)
GetTopLevelOperations(ctx context.Context, start, end time.Time, services []string) (*map[string][]string, *model.ApiError)
GetServices(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceItem, *model.ApiError)
GetTopOperations(ctx context.Context, query *model.GetTopOperationsParams) (*[]model.TopOperationsItem, *model.ApiError)
GetUsage(ctx context.Context, query *model.GetUsageParams) (*[]model.UsageItem, error)
GetServicesList(ctx context.Context) (*[]string, error)
GetDependencyGraph(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error)
GetTTL(ctx context.Context, orgID string, ttlParams *model.GetTTLParams) (*model.GetTTLResponseItem, *model.ApiError)
@@ -70,6 +74,9 @@ type Reader interface {
// Logs
GetLogFields(ctx context.Context) (*model.GetFieldsResponse, *model.ApiError)
UpdateLogField(ctx context.Context, field *model.UpdateField) *model.ApiError
GetLogs(ctx context.Context, params *model.LogsFilterParams) (*[]model.SignozLog, *model.ApiError)
TailLogs(ctx context.Context, client *model.LogsTailClient)
AggregateLogs(ctx context.Context, params *model.LogsAggregateParams) (*model.GetLogsAggregatesResponse, *model.ApiError)
GetLogAttributeKeys(ctx context.Context, req *v3.FilterAttributeKeyRequest) (*v3.FilterAttributeKeyResponse, error)
GetLogAttributeValues(ctx context.Context, req *v3.FilterAttributeValueRequest) (*v3.FilterAttributeValueResponse, error)
GetLogAggregateAttributes(ctx context.Context, req *v3.AggregateAttributeRequest) (*v3.AggregateAttributeResponse, error)
@@ -93,6 +100,8 @@ type Reader interface {
ReadRuleStateHistoryTopContributorsByRuleID(ctx context.Context, ruleID string, params *model.QueryRuleStateHistory) ([]model.RuleStateHistoryContributor, error)
GetLastSavedRuleStateHistory(ctx context.Context, ruleID string) ([]model.RuleStateHistory, error)
GetMinAndMaxTimestampForTraceID(ctx context.Context, traceID []string) (int64, int64, error)
// Query Progress tracking helpers.
ReportQueryStartForProgressTracking(queryId string) (reportQueryFinished func(), err *model.ApiError)
SubscribeToQueryProgress(queryId string) (<-chan model.QueryProgress, func(), *model.ApiError)

View File

@@ -45,9 +45,7 @@ func main() {
var maxOpenConns int
var dialTimeout time.Duration
// Deprecated
flag.BoolVar(&useLogsNewSchema, "use-logs-new-schema", false, "use logs_v2 schema for logs")
// Deprecated
flag.BoolVar(&useTraceNewSchema, "use-trace-new-schema", false, "use new schema for traces")
// Deprecated
flag.StringVar(&promConfigPath, "config", "./config/prometheus.yml", "(prometheus config to read metrics)")
@@ -128,6 +126,8 @@ func main() {
FluxInterval: fluxInterval,
FluxIntervalForTraceDetail: fluxIntervalForTraceDetail,
Cluster: cluster,
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
SigNoz: signoz,
Jwt: jwt,
}

View File

@@ -70,6 +70,16 @@ type RegisterEventParams struct {
RateLimited bool `json:"rateLimited"`
}
type GetUsageParams struct {
StartTime string
EndTime string
ServiceName string
Period string
StepHour int
Start *time.Time
End *time.Time
}
type GetServicesParams struct {
StartTime string `json:"start"`
EndTime string `json:"end"`

View File

@@ -34,29 +34,33 @@ import (
)
type PrepareTaskOptions struct {
Rule *ruletypes.PostableRule
TaskName string
RuleStore ruletypes.RuleStore
MaintenanceStore ruletypes.MaintenanceStore
Logger *zap.Logger
Reader interfaces.Reader
Cache cache.Cache
ManagerOpts *ManagerOptions
NotifyFunc NotifyFunc
SQLStore sqlstore.SQLStore
OrgID string
Rule *ruletypes.PostableRule
TaskName string
RuleStore ruletypes.RuleStore
MaintenanceStore ruletypes.MaintenanceStore
Logger *zap.Logger
Reader interfaces.Reader
Cache cache.Cache
ManagerOpts *ManagerOptions
NotifyFunc NotifyFunc
SQLStore sqlstore.SQLStore
UseLogsNewSchema bool
UseTraceNewSchema bool
OrgID string
}
type PrepareTestRuleOptions struct {
Rule *ruletypes.PostableRule
RuleStore ruletypes.RuleStore
MaintenanceStore ruletypes.MaintenanceStore
Logger *zap.Logger
Reader interfaces.Reader
Cache cache.Cache
ManagerOpts *ManagerOptions
NotifyFunc NotifyFunc
SQLStore sqlstore.SQLStore
Rule *ruletypes.PostableRule
RuleStore ruletypes.RuleStore
MaintenanceStore ruletypes.MaintenanceStore
Logger *zap.Logger
Reader interfaces.Reader
Cache cache.Cache
ManagerOpts *ManagerOptions
NotifyFunc NotifyFunc
SQLStore sqlstore.SQLStore
UseLogsNewSchema bool
UseTraceNewSchema bool
}
const taskNamesuffix = "webAppEditor"
@@ -91,7 +95,10 @@ type ManagerOptions struct {
EvalDelay time.Duration
PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
PrepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
UseLogsNewSchema bool
UseTraceNewSchema bool
PrepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
Alertmanager alertmanager.Alertmanager
SQLStore sqlstore.SQLStore
@@ -114,6 +121,9 @@ type Manager struct {
prepareTaskFunc func(opts PrepareTaskOptions) (Task, error)
prepareTestRuleFunc func(opts PrepareTestRuleOptions) (int, *model.ApiError)
UseLogsNewSchema bool
UseTraceNewSchema bool
alertmanager alertmanager.Alertmanager
sqlstore sqlstore.SQLStore
}
@@ -146,6 +156,8 @@ func defaultPrepareTaskFunc(opts PrepareTaskOptions) (Task, error) {
ruleId,
opts.Rule,
opts.Reader,
opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
WithEvalDelay(opts.ManagerOpts.EvalDelay),
WithSQLStore(opts.SQLStore),
)
@@ -395,17 +407,19 @@ func (m *Manager) editTask(_ context.Context, orgID string, rule *ruletypes.Post
zap.L().Debug("editing a rule task", zap.String("name", taskName))
newTask, err := m.prepareTaskFunc(PrepareTaskOptions{
Rule: rule,
TaskName: taskName,
RuleStore: m.ruleStore,
MaintenanceStore: m.maintenanceStore,
Logger: m.logger,
Reader: m.reader,
Cache: m.cache,
ManagerOpts: m.opts,
NotifyFunc: m.prepareNotifyFunc(),
SQLStore: m.sqlstore,
OrgID: orgID,
Rule: rule,
TaskName: taskName,
RuleStore: m.ruleStore,
MaintenanceStore: m.maintenanceStore,
Logger: m.logger,
Reader: m.reader,
Cache: m.cache,
ManagerOpts: m.opts,
NotifyFunc: m.prepareNotifyFunc(),
SQLStore: m.sqlstore,
UseLogsNewSchema: m.opts.UseLogsNewSchema,
UseTraceNewSchema: m.opts.UseTraceNewSchema,
OrgID: orgID,
})
if err != nil {
@@ -581,17 +595,19 @@ func (m *Manager) addTask(_ context.Context, orgID string, rule *ruletypes.Posta
zap.L().Debug("adding a new rule task", zap.String("name", taskName))
newTask, err := m.prepareTaskFunc(PrepareTaskOptions{
Rule: rule,
TaskName: taskName,
RuleStore: m.ruleStore,
MaintenanceStore: m.maintenanceStore,
Logger: m.logger,
Reader: m.reader,
Cache: m.cache,
ManagerOpts: m.opts,
NotifyFunc: m.prepareNotifyFunc(),
SQLStore: m.sqlstore,
OrgID: orgID,
Rule: rule,
TaskName: taskName,
RuleStore: m.ruleStore,
MaintenanceStore: m.maintenanceStore,
Logger: m.logger,
Reader: m.reader,
Cache: m.cache,
ManagerOpts: m.opts,
NotifyFunc: m.prepareNotifyFunc(),
SQLStore: m.sqlstore,
UseLogsNewSchema: m.opts.UseLogsNewSchema,
UseTraceNewSchema: m.opts.UseTraceNewSchema,
OrgID: orgID,
})
if err != nil {
@@ -971,15 +987,17 @@ func (m *Manager) TestNotification(ctx context.Context, ruleStr string) (int, *m
}
alertCount, apiErr := m.prepareTestRuleFunc(PrepareTestRuleOptions{
Rule: parsedRule,
RuleStore: m.ruleStore,
MaintenanceStore: m.maintenanceStore,
Logger: m.logger,
Reader: m.reader,
Cache: m.cache,
ManagerOpts: m.opts,
NotifyFunc: m.prepareTestNotifyFunc(),
SQLStore: m.sqlstore,
Rule: parsedRule,
RuleStore: m.ruleStore,
MaintenanceStore: m.maintenanceStore,
Logger: m.logger,
Reader: m.reader,
Cache: m.cache,
ManagerOpts: m.opts,
NotifyFunc: m.prepareTestNotifyFunc(),
SQLStore: m.sqlstore,
UseLogsNewSchema: m.opts.UseLogsNewSchema,
UseTraceNewSchema: m.opts.UseTraceNewSchema,
})
return alertCount, apiErr

View File

@@ -15,6 +15,7 @@ import (
// TestNotification prepares a dummy rule for given rule parameters and
// sends a test notification. returns alert count and error (if any)
func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError) {
ctx := context.Background()
if opts.Rule == nil {
@@ -47,6 +48,8 @@ func defaultTestNotification(opts PrepareTestRuleOptions) (int, *model.ApiError)
alertname,
parsedRule,
opts.Reader,
opts.UseLogsNewSchema,
opts.UseTraceNewSchema,
WithSendAlways(),
WithSendUnmatched(),
WithSQLStore(opts.SQLStore),

View File

@@ -29,6 +29,7 @@ import (
"github.com/SigNoz/signoz/pkg/query-service/utils/timestamp"
logsv3 "github.com/SigNoz/signoz/pkg/query-service/app/logs/v3"
tracesV3 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v3"
tracesV4 "github.com/SigNoz/signoz/pkg/query-service/app/traces/v4"
"github.com/SigNoz/signoz/pkg/query-service/formatter"
@@ -51,12 +52,16 @@ type ThresholdRule struct {
// used for attribute metadata enrichment for logs and traces
logsKeys map[string]v3.AttributeKey
spansKeys map[string]v3.AttributeKey
useTraceNewSchema bool
}
func NewThresholdRule(
id string,
p *ruletypes.PostableRule,
reader interfaces.Reader,
useLogsNewSchema bool,
useTraceNewSchema bool,
opts ...RuleOption,
) (*ThresholdRule, error) {
@@ -68,20 +73,25 @@ func NewThresholdRule(
}
t := ThresholdRule{
BaseRule: baseRule,
version: p.Version,
BaseRule: baseRule,
version: p.Version,
useTraceNewSchema: useTraceNewSchema,
}
querierOption := querier.QuerierOptions{
Reader: reader,
Cache: nil,
KeyGenerator: queryBuilder.NewKeyGenerator(),
Reader: reader,
Cache: nil,
KeyGenerator: queryBuilder.NewKeyGenerator(),
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
}
querierOptsV2 := querierV2.QuerierOptions{
Reader: reader,
Cache: nil,
KeyGenerator: queryBuilder.NewKeyGenerator(),
Reader: reader,
Cache: nil,
KeyGenerator: queryBuilder.NewKeyGenerator(),
UseLogsNewSchema: useLogsNewSchema,
UseTraceNewSchema: useTraceNewSchema,
}
t.querier = querier.NewQuerier(querierOption)
@@ -291,7 +301,11 @@ func (r *ThresholdRule) buildAndRunQuery(ctx context.Context, ts time.Time) (rul
return nil, err
}
r.spansKeys = spanKeys
tracesV4.Enrich(params, spanKeys)
if r.useTraceNewSchema {
tracesV4.Enrich(params, spanKeys)
} else {
tracesV3.Enrich(params, spanKeys)
}
}
}

View File

@@ -801,7 +801,7 @@ func TestThresholdRuleShouldAlert(t *testing.T) {
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute))
rule, err := NewThresholdRule("69", &postableRule, nil, true, true, WithEvalDelay(2*time.Minute))
if err != nil {
assert.NoError(t, err)
}
@@ -889,7 +889,7 @@ func TestPrepareLinksToLogs(t *testing.T) {
},
}
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute))
rule, err := NewThresholdRule("69", &postableRule, nil, true, true, WithEvalDelay(2*time.Minute))
if err != nil {
assert.NoError(t, err)
}
@@ -930,7 +930,7 @@ func TestPrepareLinksToTraces(t *testing.T) {
},
}
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute))
rule, err := NewThresholdRule("69", &postableRule, nil, true, true, WithEvalDelay(2*time.Minute))
if err != nil {
assert.NoError(t, err)
}
@@ -1005,7 +1005,7 @@ func TestThresholdRuleLabelNormalization(t *testing.T) {
postableRule.RuleCondition.MatchType = ruletypes.MatchType(c.matchType)
postableRule.RuleCondition.Target = &c.target
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute))
rule, err := NewThresholdRule("69", &postableRule, nil, true, true, WithEvalDelay(2*time.Minute))
if err != nil {
assert.NoError(t, err)
}
@@ -1057,7 +1057,7 @@ func TestThresholdRuleEvalDelay(t *testing.T) {
}
for idx, c := range cases {
rule, err := NewThresholdRule("69", &postableRule, nil) // no eval delay
rule, err := NewThresholdRule("69", &postableRule, nil, true, true) // no eval delay
if err != nil {
assert.NoError(t, err)
}
@@ -1105,7 +1105,7 @@ func TestThresholdRuleClickHouseTmpl(t *testing.T) {
}
for idx, c := range cases {
rule, err := NewThresholdRule("69", &postableRule, nil, WithEvalDelay(2*time.Minute))
rule, err := NewThresholdRule("69", &postableRule, nil, true, true, WithEvalDelay(2*time.Minute))
if err != nil {
assert.NoError(t, err)
}
@@ -1244,8 +1244,8 @@ func TestThresholdRuleUnitCombinations(t *testing.T) {
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
readerCache, err := memorycache.New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
require.NoError(t, err)
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache)
rule, err := NewThresholdRule("69", &postableRule, reader)
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", true, true, time.Duration(time.Second), readerCache)
rule, err := NewThresholdRule("69", &postableRule, reader, true, true)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
v3.Delta: true,
@@ -1340,9 +1340,9 @@ func TestThresholdRuleNoData(t *testing.T) {
}
readerCache, err := memorycache.New(context.Background(), factorytest.NewSettings(), cache.Config{Provider: "memory", Memory: cache.Memory{TTL: DefaultFrequency}})
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), readerCache)
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", true, true, time.Duration(time.Second), readerCache)
rule, err := NewThresholdRule("69", &postableRule, reader)
rule, err := NewThresholdRule("69", &postableRule, reader, true, true)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
v3.Delta: true,
@@ -1444,9 +1444,9 @@ func TestThresholdRuleTracesLink(t *testing.T) {
}
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil)
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", true, true, time.Duration(time.Second), nil)
rule, err := NewThresholdRule("69", &postableRule, reader)
rule, err := NewThresholdRule("69", &postableRule, reader, true, true)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
v3.Delta: true,
@@ -1565,9 +1565,9 @@ func TestThresholdRuleLogsLink(t *testing.T) {
}
options := clickhouseReader.NewOptions("", "", "archiveNamespace")
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", time.Duration(time.Second), nil)
reader := clickhouseReader.NewReaderFromClickhouseConnection(options, nil, telemetryStore, prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}), "", true, true, time.Duration(time.Second), nil)
rule, err := NewThresholdRule("69", &postableRule, reader)
rule, err := NewThresholdRule("69", &postableRule, reader, true, true)
rule.TemporalityMap = map[string]map[v3.Temporality]bool{
"signoz_calls_total": {
v3.Delta: true,
@@ -1643,7 +1643,7 @@ func TestThresholdRuleShiftBy(t *testing.T) {
},
}
rule, err := NewThresholdRule("69", &postableRule, nil)
rule, err := NewThresholdRule("69", &postableRule, nil, true, true)
if err != nil {
assert.NoError(t, err)
}

View File

@@ -46,6 +46,8 @@ func NewMockClickhouseReader(t *testing.T, testDB sqlstore.SQLStore) (*clickhous
telemetryStore,
prometheustest.New(instrumentationtest.New().Logger(), prometheus.Config{}),
"",
true,
true,
time.Duration(time.Second),
nil,
)