Compare commits

..

17 Commits

Author SHA1 Message Date
Rajat-Dabade
ced74603c0 chore: updated test script 2023-12-15 13:46:16 +05:30
Rajat-Dabade
f59fb81109 refactor: updated test directory 2023-12-15 13:46:16 +05:30
Rajat-Dabade
507e68a0c1 refactor: reverted back as working directory is specified as frontend 2023-12-15 13:46:16 +05:30
Rajat-Dabade
4ad8a1f3ad refactor: shifted i18n to original location 2023-12-15 13:46:16 +05:30
Rajat-Dabade
19faf6a584 chore: updates 2023-12-15 13:46:16 +05:30
Rajat-Dabade
3978ada811 refactor: for push and pull request 2023-12-15 13:46:16 +05:30
Rajat-Dabade
0a04fc04a5 refactor: generate code coverage report on every push 2023-12-15 13:46:16 +05:30
Rajat-Dabade
7c9e333b84 refactor: added package-manager 2023-12-15 13:46:16 +05:30
Rajat-Dabade
dd78afb20f refactor: updated the working directory 2023-12-15 13:46:16 +05:30
Rajat-Dabade
237d765376 refactor: updated github flow 2023-12-15 13:46:16 +05:30
Rajat-Dabade
85e865fb1b refactor: updated token 2023-12-15 13:46:16 +05:30
Rajat-Dabade
975e5daf03 refactor: updated test case 2023-12-15 13:46:16 +05:30
Rajat-Dabade
8a532cca17 refactor: updated jest running command 2023-12-15 13:46:16 +05:30
Rajat-Dabade
b9c908719f refactor: updated the command for jest 2023-12-15 13:46:16 +05:30
Rajat-Dabade
63c7b5e9e1 chore: minor changes 2023-12-15 13:46:16 +05:30
Rajat-Dabade
32eeb3d106 refactor: done some changes 2023-12-15 13:46:16 +05:30
Rajat-Dabade
1a4ec2bf00 feat: jest code coverage report 2023-12-15 13:46:16 +05:30
150 changed files with 560 additions and 4201 deletions

View File

@@ -0,0 +1,32 @@
name: Code Coverage
on:
push:
branches:
- develop
- main
- release/v*
pull_request:
branches:
- develop
- main
- release/v*
jobs:
coverage:
runs-on: ubuntu-latest
permissions:
checks: write
pull-requests: write
contents: write
steps:
- name: Checkout Repository
uses: actions/checkout@v2
- uses: jwalton/gh-find-current-pr@v1
id: findPr
- uses: ArtiomTr/jest-coverage-report-action@v2
with:
package-manager: yarn
working-directory: frontend
test-script: yarn jest:coverage
github-token: ${{ secrets.GITHUB_TOKEN }}
output: comment
prnumber: ${{ steps.findPr.outputs.number }}

View File

@@ -199,13 +199,10 @@ Not sure how to get started? Just ping us on `#contributing` in our [slack commu
#### Frontend
- [Palash Gupta](https://github.com/palashgdev)
- [Yunus M](https://github.com/YounixM)
- [Rajat Dabade](https://github.com/Rajat-Dabade)
#### DevOps
- [Prashant Shahi](https://github.com/prashant-shahi)
- [Dhawal Sanghvi](https://github.com/dhawal1248)
<br /><br />

View File

@@ -146,7 +146,7 @@ services:
condition: on-failure
query-service:
image: signoz/query-service:0.36.0
image: signoz/query-service:0.35.1
command:
[
"-config=/root/config/prometheus.yml",
@@ -186,7 +186,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:0.36.0
image: signoz/frontend:0.35.1
deploy:
restart_policy:
condition: on-failure
@@ -199,7 +199,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector:
image: signoz/signoz-otel-collector:0.88.4
image: signoz/signoz-otel-collector:0.88.3
command:
[
"--config=/etc/otel-collector-config.yaml",
@@ -237,7 +237,7 @@ services:
- query-service
otel-collector-migrator:
image: signoz/signoz-schema-migrator:0.88.4
image: signoz/signoz-schema-migrator:0.88.3
deploy:
restart_policy:
condition: on-failure
@@ -250,7 +250,7 @@ services:
# - clickhouse-3
otel-collector-metrics:
image: signoz/signoz-otel-collector:0.88.4
image: signoz/signoz-otel-collector:0.88.3
command:
[
"--config=/etc/otel-collector-metrics-config.yaml",

View File

@@ -66,7 +66,7 @@ services:
- --storage.path=/data
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.4}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.3}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
@@ -81,7 +81,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
otel-collector:
container_name: signoz-otel-collector
image: signoz/signoz-otel-collector:0.88.4
image: signoz/signoz-otel-collector:0.88.3
command:
[
"--config=/etc/otel-collector-config.yaml",
@@ -118,7 +118,7 @@ services:
otel-collector-metrics:
container_name: signoz-otel-collector-metrics
image: signoz/signoz-otel-collector:0.88.4
image: signoz/signoz-otel-collector:0.88.3
command:
[
"--config=/etc/otel-collector-metrics-config.yaml",

View File

@@ -164,7 +164,7 @@ services:
# Notes for Maintainers/Contributors who will change Line Numbers of Frontend & Query-Section. Please Update Line Numbers in `./scripts/commentLinesForSetup.sh` & `./CONTRIBUTING.md`
query-service:
image: signoz/query-service:${DOCKER_TAG:-0.36.0}
image: signoz/query-service:${DOCKER_TAG:-0.35.1}
container_name: signoz-query-service
command:
[
@@ -203,7 +203,7 @@ services:
<<: *db-depend
frontend:
image: signoz/frontend:${DOCKER_TAG:-0.36.0}
image: signoz/frontend:${DOCKER_TAG:-0.35.1}
container_name: signoz-frontend
restart: on-failure
depends_on:
@@ -215,7 +215,7 @@ services:
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
otel-collector-migrator:
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.4}
image: signoz/signoz-schema-migrator:${OTELCOL_TAG:-0.88.3}
container_name: otel-migrator
command:
- "--dsn=tcp://clickhouse:9000"
@@ -229,7 +229,7 @@ services:
otel-collector:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.4}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.3}
container_name: signoz-otel-collector
command:
[
@@ -269,7 +269,7 @@ services:
condition: service_healthy
otel-collector-metrics:
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.4}
image: signoz/signoz-otel-collector:${OTELCOL_TAG:-0.88.3}
container_name: signoz-otel-collector-metrics
command:
[

View File

@@ -18,7 +18,6 @@ COPY ee/query-service/bin/query-service-${TARGETOS}-${TARGETARCH} /root/query-se
# copy prometheus YAML config
COPY pkg/query-service/config/prometheus.yml /root/config/prometheus.yml
COPY pkg/query-service/templates /root/templates
# Make query-service executable for non-root users
RUN chmod 755 /root /root/query-service

Binary file not shown.

Before

Width:  |  Height:  |  Size: 51 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 957 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 45 KiB

View File

@@ -14,6 +14,5 @@
"delete_domain_message": "Are you sure you want to delete this domain?",
"delete_domain": "Delete Domain",
"add_domain": "Add Domains",
"saml_settings": "Your SAML settings have been saved, please login from incognito window to confirm that it has been set up correctly",
"invite_link_share_manually": "After inviting members, please copy the invite link and send them the link manually"
"saml_settings":"Your SAML settings have been saved, please login from incognito window to confirm that it has been set up correctly"
}

View File

@@ -1,3 +0,0 @@
{
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support."
}

View File

@@ -14,6 +14,5 @@
"delete_domain_message": "Are you sure you want to delete this domain?",
"delete_domain": "Delete Domain",
"add_domain": "Add Domains",
"saml_settings": "Your SAML settings have been saved, please login from incognito window to confirm that it has been set up correctly",
"invite_link_share_manually": "After inviting members, please copy the invite link and send them the link manually"
"saml_settings":"Your SAML settings have been saved, please login from incognito window to confirm that it has been set up correctly"
}

View File

@@ -1,3 +0,0 @@
{
"rps_over_100": "You are sending data at more than 100 RPS, your ingestion may be rate limited. Please reach out to us via Intercom support."
}

View File

@@ -1,10 +1,9 @@
import cacheBursting from 'i18n-translations-hash.json';
import i18n from 'i18next';
import LanguageDetector from 'i18next-browser-languagedetector';
import Backend from 'i18next-http-backend';
import { initReactI18next } from 'react-i18next';
import cacheBursting from '../../i18n-translations-hash.json';
i18n
// load translation using http -> see /public/locales
.use(Backend)

View File

@@ -1,15 +0,0 @@
import { ApiV3Instance as axios } from 'api';
import { ApiResponse } from 'types/api';
import { ICompositeMetricQuery } from 'types/api/alerts/compositeQuery';
import { QueryRangePayload } from 'types/api/metrics/getQueryRange';
interface IQueryRangeFormat {
compositeQuery: ICompositeMetricQuery;
}
export const getQueryRangeFormat = (
props?: Partial<QueryRangePayload>,
): Promise<IQueryRangeFormat> =>
axios
.post<ApiResponse<IQueryRangeFormat>>('/query_range/format', props)
.then((res) => res.data.data);

View File

@@ -27,7 +27,6 @@ function DynamicColumnTable({
);
useEffect(() => {
setColumnsData(columns);
const visibleColumns = getVisibleColumns({
tablesource,
columnsData: columns,
@@ -43,7 +42,7 @@ function DynamicColumnTable({
: undefined,
);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [columns]);
}, []);
const onToggleHandler = (index: number) => (
checked: boolean,

View File

@@ -1,2 +0,0 @@
const MAX_RPS_LIMIT = 100;
export { MAX_RPS_LIMIT };

View File

@@ -1,16 +0,0 @@
.span-container {
.spanDetails {
position: absolute;
height: 50px;
padding: 8px;
min-width: 150px;
background: lightcyan;
color: black;
bottom: 24px;
left: 0;
display: flex;
justify-content: center;
align-items: center;
}
}

View File

@@ -1,96 +0,0 @@
import '../GantChart.styles.scss';
import { Popover, Typography } from 'antd';
import { convertTimeToRelevantUnit } from 'container/TraceDetail/utils';
import dayjs from 'dayjs';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { useEffect } from 'react';
import { toFixed } from 'utils/toFixed';
import { SpanBorder, SpanLine, SpanText, SpanWrapper } from './styles';
interface SpanLengthProps {
globalStart: number;
startTime: number;
name: string;
width: string;
leftOffset: string;
bgColor: string;
inMsCount: number;
}
function Span(props: SpanLengthProps): JSX.Element {
const {
width,
leftOffset,
bgColor,
inMsCount,
startTime,
name,
globalStart,
} = props;
const isDarkMode = useIsDarkMode();
const { time, timeUnitName } = convertTimeToRelevantUnit(inMsCount);
useEffect(() => {
document.documentElement.scrollTop = document.documentElement.clientHeight;
document.documentElement.scrollLeft = document.documentElement.clientWidth;
}, []);
const getContent = (): JSX.Element => {
const timeStamp = dayjs(startTime).format('h:mm:ss:SSS A');
const startTimeInMs = startTime - globalStart;
return (
<div>
<Typography.Text style={{ marginBottom: '8px' }}>
{' '}
Duration : {inMsCount}
</Typography.Text>
<br />
<Typography.Text style={{ marginBottom: '8px' }}>
Start Time: {startTimeInMs}ms [{timeStamp}]{' '}
</Typography.Text>
</div>
);
};
return (
<SpanWrapper className="span-container">
<SpanLine
className="spanLine"
isDarkMode={isDarkMode}
bgColor={bgColor}
leftOffset={leftOffset}
width={width}
/>
<div>
<Popover
style={{
left: `${leftOffset}%`,
}}
title={name}
content={getContent()}
trigger="hover"
placement="left"
autoAdjustOverflow
>
<SpanBorder
className="spanTrack"
isDarkMode={isDarkMode}
bgColor={bgColor}
leftOffset={leftOffset}
width={width}
/>
</Popover>
</div>
<SpanText isDarkMode={isDarkMode} leftOffset={leftOffset}>{`${toFixed(
time,
2,
)} ${timeUnitName}`}</SpanText>
</SpanWrapper>
);
}
export default Span;

View File

@@ -0,0 +1,40 @@
import { convertTimeToRelevantUnit } from 'container/TraceDetail/utils';
import { useIsDarkMode } from 'hooks/useDarkMode';
import { toFixed } from 'utils/toFixed';
import { SpanBorder, SpanLine, SpanText, SpanWrapper } from './styles';
interface SpanLengthProps {
width: string;
leftOffset: string;
bgColor: string;
inMsCount: number;
}
function SpanLength(props: SpanLengthProps): JSX.Element {
const { width, leftOffset, bgColor, inMsCount } = props;
const isDarkMode = useIsDarkMode();
const { time, timeUnitName } = convertTimeToRelevantUnit(inMsCount);
return (
<SpanWrapper>
<SpanLine
isDarkMode={isDarkMode}
bgColor={bgColor}
leftOffset={leftOffset}
width={width}
/>
<SpanBorder
isDarkMode={isDarkMode}
bgColor={bgColor}
leftOffset={leftOffset}
width={width}
/>
<SpanText isDarkMode={isDarkMode} leftOffset={leftOffset}>{`${toFixed(
time,
2,
)} ${timeUnitName}`}</SpanText>
</SpanWrapper>
);
}
export default SpanLength;

View File

@@ -16,7 +16,7 @@ import {
import { ITraceTree } from 'types/api/trace/getTraceItem';
import { ITraceMetaData } from '..';
import Span from '../Span';
import SpanLength from '../SpanLength';
import SpanName from '../SpanName';
import { getMetaDataFromSpanTree, getTopLeftFromBody } from '../utils';
import {
@@ -169,10 +169,7 @@ function Trace(props: TraceProps): JSX.Element {
</StyledRow>
</StyledCol>
<Col flex="1">
<Span
globalStart={globalStart}
startTime={startTime}
name={name}
<SpanLength
leftOffset={nodeLeftOffset.toString()}
width={width.toString()}
bgColor={serviceColour}

View File

@@ -41,11 +41,7 @@ function GanttChart(props: GanttChartProps): JSX.Element {
onClick={handleCollapse}
title={isExpandAll ? 'Collapse All' : 'Expand All'}
>
{isExpandAll ? (
<MinusSquareOutlined style={{ fontSize: '16px', color: '#08c' }} />
) : (
<PlusSquareOutlined style={{ fontSize: '16px', color: '#08c' }} />
)}
{isExpandAll ? <MinusSquareOutlined /> : <PlusSquareOutlined />}
</CollapseButton>
<CardWrapper>
<Trace

View File

@@ -11,7 +11,6 @@ export const Wrapper = styled.ul`
border-left: 1px solid #434343;
padding-left: 1rem;
width: 100%;
margin: 0px;
}
ul li {
@@ -45,4 +44,6 @@ export const CardContainer = styled.li`
export const CollapseButton = styled.div`
position: absolute;
top: 0;
left: 0;
font-size: 1.2rem;
`;

View File

@@ -14,7 +14,7 @@ import { Dropdown, MenuProps, Tooltip, Typography } from 'antd';
import Spinner from 'components/Spinner';
import { QueryParams } from 'constants/query';
import { PANEL_TYPES } from 'constants/queryBuilder';
import useCreateAlerts from 'hooks/queryBuilder/useCreateAlerts';
import ROUTES from 'constants/routes';
import useComponentPermission from 'hooks/useComponentPermission';
import history from 'lib/history';
import { ReactNode, useCallback, useMemo } from 'react';
@@ -71,7 +71,13 @@ function WidgetHeader({
);
}, [widget.id, widget.panelTypes, widget.query]);
const onCreateAlertsHandler = useCreateAlerts(widget);
const onCreateAlertsHandler = useCallback(() => {
history.push(
`${ROUTES.ALERTS_NEW}?${QueryParams.compositeQuery}=${encodeURIComponent(
JSON.stringify(widget.query),
)}`,
);
}, [widget]);
const keyMethodMapping = useMemo(
() => ({

View File

@@ -1,7 +1,7 @@
/* eslint-disable react/display-name */
import { PlusOutlined } from '@ant-design/icons';
import { Input, Typography } from 'antd';
import type { ColumnsType } from 'antd/es/table/interface';
import { Typography } from 'antd';
import { ColumnsType } from 'antd/lib/table';
import saveAlertApi from 'api/alerts/save';
import DropDown from 'components/DropDown/DropDown';
import {
@@ -14,12 +14,9 @@ import LabelColumn from 'components/TableRenderer/LabelColumn';
import TextToolTip from 'components/TextToolTip';
import { QueryParams } from 'constants/query';
import ROUTES from 'constants/routes';
import useSortableTable from 'hooks/ResizeTable/useSortableTable';
import useComponentPermission from 'hooks/useComponentPermission';
import useDebouncedFn from 'hooks/useDebouncedFunction';
import useInterval from 'hooks/useInterval';
import { useNotifications } from 'hooks/useNotifications';
import useUrlQuery from 'hooks/useUrlQuery';
import history from 'lib/history';
import { mapQueryDataFromApi } from 'lib/newQueryBuilder/queryBuilderMappers/mapQueryDataFromApi';
import { useCallback, useState } from 'react';
@@ -32,19 +29,12 @@ import { GettableAlert } from 'types/api/alerts/get';
import AppReducer from 'types/reducer/app';
import DeleteAlert from './DeleteAlert';
import {
Button,
ButtonContainer,
ColumnButton,
SearchContainer,
} from './styles';
import { Button, ButtonContainer, ColumnButton } from './styles';
import Status from './TableComponents/Status';
import ToggleAlertState from './ToggleAlertState';
import { filterAlerts } from './utils';
const { Search } = Input;
function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
const [data, setData] = useState<GettableAlert[]>(allAlertRules || []);
const { t } = useTranslation('common');
const { role, featureResponse } = useSelector<AppState, AppReducer>(
(state) => state.app,
@@ -54,39 +44,13 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
role,
);
const params = useUrlQuery();
const orderColumnParam = params.get('columnKey');
const orderQueryParam = params.get('order');
const paginationParam = params.get('page');
const searchParams = params.get('search');
const [searchString, setSearchString] = useState<string>(searchParams || '');
const [data, setData] = useState<GettableAlert[]>(() => {
const value = searchString.toLowerCase();
const filteredData = filterAlerts(allAlertRules, value);
return filteredData || [];
});
// Type asuring
const sortingOrder: 'ascend' | 'descend' | null =
orderQueryParam === 'ascend' || orderQueryParam === 'descend'
? orderQueryParam
: null;
const { sortedInfo, handleChange } = useSortableTable<GettableAlert>(
sortingOrder,
orderColumnParam || '',
searchString,
);
const { notifications: notificationsApi } = useNotifications();
useInterval(() => {
(async (): Promise<void> => {
const { data: refetchData, status } = await refetch();
if (status === 'success') {
const value = searchString.toLowerCase();
const filteredData = filterAlerts(refetchData.payload || [], value);
setData(filteredData || []);
setData(refetchData?.payload || []);
}
if (status === 'error') {
notificationsApi.error({
@@ -164,13 +128,6 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
}
};
const handleSearch = useDebouncedFn((e: unknown) => {
const value = (e as React.BaseSyntheticEvent).target.value.toLowerCase();
setSearchString(value);
const filteredData = filterAlerts(allAlertRules, value);
setData(filteredData);
});
const dynamicColumns: ColumnsType<GettableAlert> = [
{
title: 'Created At',
@@ -185,10 +142,6 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
return prev - next;
},
render: DateComponent,
sortOrder:
sortedInfo.columnKey === DynamicColumnsKey.CreatedAt
? sortedInfo.order
: null,
},
{
title: 'Created By',
@@ -210,10 +163,6 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
return prev - next;
},
render: DateComponent,
sortOrder:
sortedInfo.columnKey === DynamicColumnsKey.UpdatedAt
? sortedInfo.order
: null,
},
{
title: 'Updated By',
@@ -234,7 +183,6 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
(b.state ? b.state.charCodeAt(0) : 1000) -
(a.state ? a.state.charCodeAt(0) : 1000),
render: (value): JSX.Element => <Status status={value} />,
sortOrder: sortedInfo.columnKey === 'state' ? sortedInfo.order : null,
},
{
title: 'Alert Name',
@@ -250,7 +198,6 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
render: (value, record): JSX.Element => (
<Typography.Link onClick={onEditHandler(record)}>{value}</Typography.Link>
),
sortOrder: sortedInfo.columnKey === 'name' ? sortedInfo.order : null,
},
{
title: 'Severity',
@@ -267,7 +214,6 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
return <Typography>{severityValue}</Typography>;
},
sortOrder: sortedInfo.columnKey === 'severity' ? sortedInfo.order : null,
},
{
title: 'Labels',
@@ -325,37 +271,26 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
return (
<>
<SearchContainer>
<Search
placeholder="Search by Alert Name, Severity and Labels"
onChange={handleSearch}
defaultValue={searchString}
<ButtonContainer>
<TextToolTip
{...{
text: `More details on how to create alerts`,
url: 'https://signoz.io/docs/userguide/alerts-management/',
}}
/>
<ButtonContainer>
<TextToolTip
{...{
text: `More details on how to create alerts`,
url: 'https://signoz.io/docs/userguide/alerts-management/',
}}
/>
{addNewAlert && (
<Button onClick={onClickNewAlertHandler} icon={<PlusOutlined />}>
New Alert
</Button>
)}
</ButtonContainer>
</SearchContainer>
{addNewAlert && (
<Button onClick={onClickNewAlertHandler} icon={<PlusOutlined />}>
New Alert
</Button>
)}
</ButtonContainer>
<DynamicColumnTable
tablesource={TableDataSource.Alert}
columns={columns}
rowKey="id"
dataSource={data}
dynamicColumns={dynamicColumns}
onChange={handleChange}
pagination={{
defaultCurrent: Number(paginationParam) || 1,
}}
/>
</>
);

View File

@@ -1,17 +1,11 @@
import { Button as ButtonComponent } from 'antd';
import styled from 'styled-components';
export const SearchContainer = styled.div`
&&& {
display: flex;
margin-bottom: 2rem;
align-items: center;
gap: 2rem;
}
`;
export const ButtonContainer = styled.div`
&&& {
display: flex;
justify-content: flex-end;
margin-bottom: 2rem;
align-items: center;
}
`;

View File

@@ -1,25 +0,0 @@
import { GettableAlert } from 'types/api/alerts/get';
export const filterAlerts = (
allAlertRules: GettableAlert[],
filter: string,
): GettableAlert[] => {
const value = filter.toLowerCase();
return allAlertRules.filter((alert) => {
const alertName = alert.alert.toLowerCase();
const severity = alert.labels?.severity.toLowerCase();
const labels = Object.keys(alert.labels || {})
.filter((e) => e !== 'severity')
.join(' ')
.toLowerCase();
const labelValue = Object.values(alert.labels || {});
return (
alertName.includes(value) ||
severity?.includes(value) ||
labels.includes(value) ||
labelValue.includes(value)
);
});
};

View File

@@ -22,7 +22,6 @@ import { ILog } from 'types/api/logs/log';
import ActionItem, { ActionItemProps } from './ActionItem';
import FieldRenderer from './FieldRenderer';
import {
filterKeyForField,
flattenObject,
jsonToDataNodes,
recursiveParseJSON,
@@ -99,12 +98,11 @@ function TableView({
title: 'Action',
width: 11,
render: (fieldData: Record<string, string>): JSX.Element | null => {
const fieldFilterKey = filterKeyForField(fieldData.field);
if (!RESTRICTED_FIELDS.includes(fieldFilterKey)) {
const fieldKey = fieldData.field.split('.').slice(-1);
if (!RESTRICTED_FIELDS.includes(fieldKey[0])) {
return (
<ActionItem
fieldKey={fieldFilterKey}
fieldKey={fieldKey[0]}
fieldValue={fieldData.value}
onClickActionItem={onClickActionItem}
/>
@@ -121,6 +119,7 @@ function TableView({
align: 'left',
ellipsis: true,
render: (field: string, record): JSX.Element => {
const fieldKey = field.split('.').slice(-1);
const renderedField = <FieldRenderer field={field} />;
if (record.field === 'trace_id') {
@@ -149,11 +148,10 @@ function TableView({
);
}
const fieldFilterKey = filterKeyForField(field);
if (!RESTRICTED_FIELDS.includes(fieldFilterKey)) {
if (!RESTRICTED_FIELDS.includes(fieldKey[0])) {
return (
<AddToQueryHOC
fieldKey={fieldFilterKey}
fieldKey={fieldKey[0]}
fieldValue={flattenLogData[field]}
onAddToQuery={onAddToQuery}
>

View File

@@ -132,16 +132,6 @@ export const generateFieldKeyForArray = (
export const removeObjectFromString = (str: string): string =>
str.replace(/\[object Object\]./g, '');
// Split `str` on the first occurrence of `delimiter`
// For example, will return `['a', 'b.c']` when splitting `'a.b.c'` at dots
const splitOnce = (str: string, delimiter: string): string[] => {
const parts = str.split(delimiter);
if (parts.length < 2) {
return parts;
}
return [parts[0], parts.slice(1).join(delimiter)];
};
export const getFieldAttributes = (field: string): IFieldAttributes => {
let dataType;
let newField;
@@ -150,30 +140,18 @@ export const getFieldAttributes = (field: string): IFieldAttributes => {
if (field.startsWith('attributes_')) {
logType = MetricsType.Tag;
const stringWithoutPrefix = field.slice('attributes_'.length);
const parts = splitOnce(stringWithoutPrefix, '.');
const parts = stringWithoutPrefix.split('.');
[dataType, newField] = parts;
} else if (field.startsWith('resources_')) {
logType = MetricsType.Resource;
const stringWithoutPrefix = field.slice('resources_'.length);
const parts = splitOnce(stringWithoutPrefix, '.');
const parts = stringWithoutPrefix.split('.');
[dataType, newField] = parts;
}
return { dataType, newField, logType };
};
// Returns key to be used when filtering for `field` via
// the query builder. This is useful for powering filtering
// by field values from log details view.
export const filterKeyForField = (field: string): string => {
// Must work for all 3 of the following types of cases
// timestamp -> timestamp
// attributes_string.log.file -> log.file
// resources_string.k8s.pod.name -> k8s.pod.name
const fieldAttribs = getFieldAttributes(field);
return fieldAttribs?.newField || field;
};
export const aggregateAttributesResourcesToString = (logData: ILog): string => {
const outputJson: ILogAggregateAttributesResources = {
body: logData.body,

View File

@@ -147,13 +147,13 @@ function LogsExplorerViews(): JSX.Element {
[currentQuery, updateAllQueriesOperators],
);
const {
data: listChartData,
isFetching: isFetchingListChartData,
isLoading: isLoadingListChartData,
} = useGetExplorerQueryRange(listChartQuery, PANEL_TYPES.TIME_SERIES, {
enabled: !!listChartQuery && panelType === PANEL_TYPES.LIST,
});
const listChartData = useGetExplorerQueryRange(
listChartQuery,
PANEL_TYPES.TIME_SERIES,
{
enabled: !!listChartQuery && panelType === PANEL_TYPES.LIST,
},
);
const { data, isFetching, isError } = useGetExplorerQueryRange(
requestData,
@@ -445,8 +445,12 @@ function LogsExplorerViews(): JSX.Element {
if (!stagedQuery) return [];
if (panelType === PANEL_TYPES.LIST) {
if (listChartData && listChartData.payload.data.result.length > 0) {
return listChartData.payload.data.result;
if (
listChartData &&
listChartData.data &&
listChartData.data.payload.data.result.length > 0
) {
return listChartData.data.payload.data.result;
}
return [];
}
@@ -468,10 +472,7 @@ function LogsExplorerViews(): JSX.Element {
return (
<>
<LogsExplorerChart
isLoading={isFetchingListChartData || isLoadingListChartData}
data={chartData}
/>
<LogsExplorerChart isLoading={isFetching} data={chartData} />
{stagedQuery && (
<ActionsWrapper>
<ExportPanel

View File

@@ -10,9 +10,11 @@ import {
} from 'antd';
import InputComponent from 'components/Input';
import TimePreference from 'components/TimePreferenceDropDown';
import { QueryParams } from 'constants/query';
import { PANEL_TYPES } from 'constants/queryBuilder';
import ROUTES from 'constants/routes';
import GraphTypes from 'container/NewDashboard/ComponentsSlider/menuItems';
import useCreateAlerts from 'hooks/queryBuilder/useCreateAlerts';
import history from 'lib/history';
import { Dispatch, SetStateAction, useCallback } from 'react';
import { Widgets } from 'types/api/dashboard/getAll';
@@ -53,7 +55,15 @@ function RightContainer({
const selectedGraphType =
GraphTypes.find((e) => e.name === selectedGraph)?.display || '';
const onCreateAlertsHandler = useCreateAlerts(selectedWidget);
const onCreateAlertsHandler = useCallback(() => {
if (!selectedWidget) return;
history.push(
`${ROUTES.ALERTS_NEW}?${QueryParams.compositeQuery}=${encodeURIComponent(
JSON.stringify(selectedWidget?.query),
)}`,
);
}, [selectedWidget]);
const allowThreshold = panelTypeVsThreshold[selectedGraph];

View File

@@ -1,24 +0,0 @@
## Install otel-collector in your Kubernetes infra
&nbsp;
Add the SigNoz Helm Chart repository
```bash
helm repo add signoz https://charts.signoz.io
```
&nbsp;
If the chart is already present, update the chart to the latest using:
```bash
helm repo update
```
&nbsp;
Install the Kubernetes Infrastructure chart provided by SigNoz
```bash
helm install my-release signoz/k8s-infra \
--set otelCollectorEndpoint=ingest.{{REGION}}.signoz.cloud:443 \
--set otelInsecure=false \
--set signozApiKey={{SIGNOZ_INGESTION_KEY}} \
--set global.clusterName=<CLUSTER_NAME>
```
- Replace `<CLUSTER_NAME>` with the name of the Kubernetes cluster or a unique identifier of the cluster.

View File

@@ -1,65 +0,0 @@
After setting up the Otel collector agent, follow the steps below to instrument your .NET Application
&nbsp;
&nbsp;
### Step 1: Install OpenTelemetry Dependencies
Install the following dependencies in your application.
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
otlpOptions.Endpoint = new Uri("http://localhost:4317");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@@ -1,10 +0,0 @@
&nbsp;
To run your .NET application, use the below command :
```bash
dotnet build
dotnet run
```
Once you run your .NET application, interact with your application to generate some load and see your application in the SigNoz UI.

View File

@@ -1,71 +0,0 @@
### Step 1: Install OpenTelemetry Dependencies
Dependencies related to OpenTelemetry exporter and SDK have to be installed first.
Run the below commands after navigating to the application source folder:
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
//sigNoz Cloud Endpoint
otlpOptions.Endpoint = new Uri("https://ingest.{{REGION}}.signoz.cloud:443");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
//SigNoz Cloud account Ingestion key
string headerKey = "signoz-access-token";
string headerValue = "{{SIGNOZ_INGESTION_KEY}}";
string formattedHeader = $"{headerKey}={headerValue}";
otlpOptions.Headers = formattedHeader;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@@ -1,10 +0,0 @@
&nbsp;
To run your .NET application, use the below command :
```bash
dotnet build
dotnet run
```
Once you run your .NET application, interact with your application to generate some load and see your application in the SigNoz UI.

View File

@@ -1,98 +0,0 @@
## Setup OpenTelemetry Binary as an agent
&nbsp;
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_amd64.tar.gz
```
&nbsp;
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_amd64.tar.gz -C otelcol-contrib
```
&nbsp;
### Step 3: Create `config.yaml` in `otelcol-contrib` folder with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@@ -1,67 +0,0 @@
After setting up the Otel collector agent, follow the steps below to instrument your .NET Application
&nbsp;
&nbsp;
### Step 1: Install OpenTelemetry Dependencies
Install the following dependencies in your application.
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
otlpOptions.Endpoint = new Uri("http://localhost:4317");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@@ -1,18 +0,0 @@
&nbsp;
Once you are done intrumenting your .NET application, you can run it using the below commands
&nbsp;
### Step 1: Run OTel Collector
Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
```bash
./otelcol-contrib --config ./config.yaml
```
&nbsp;
### Step 2: Run your .NET application
```bash
dotnet build
dotnet run
```

View File

@@ -1,70 +0,0 @@
### Step 1: Install OpenTelemetry Dependencies
Dependencies related to OpenTelemetry exporter and SDK have to be installed first.
Run the below commands after navigating to the application source folder:
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
//sigNoz Cloud Endpoint
otlpOptions.Endpoint = new Uri("https://ingest.{{REGION}}.signoz.cloud:443");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
//SigNoz Cloud account Ingestion key
string headerKey = "signoz-access-token";
string headerValue = "{{SIGNOZ_INGESTION_KEY}}";
string formattedHeader = $"{headerKey}={headerValue}";
otlpOptions.Headers = formattedHeader;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@@ -1,10 +0,0 @@
&nbsp;
To run your .NET application, use the below command :
```bash
dotnet build
dotnet run
```
Once you run your .NET application, interact with your application to generate some load and see your application in the SigNoz UI.

View File

@@ -1,99 +0,0 @@
## Setup OpenTelemetry Binary as an agent
&nbsp;
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_arm64.tar.gz
```
&nbsp;
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_arm64.tar.gz -C otelcol-contrib
```
&nbsp;
### Step 3: Create `config.yaml` in `otelcol-contrib` folder with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@@ -1,68 +0,0 @@
After setting up the Otel collector agent, follow the steps below to instrument your .NET Application
&nbsp;
&nbsp;
### Step 1: Install OpenTelemetry Dependencies
Install the following dependencies in your application.
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
otlpOptions.Endpoint = new Uri("http://localhost:4317");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@@ -1,18 +0,0 @@
&nbsp;
Once you are done intrumenting your .NET application, you can run it using the below commands
&nbsp;
### Step 1: Run OTel Collector
Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
```bash
./otelcol-contrib --config ./config.yaml
```
&nbsp;
### Step 2: Run your .NET application
```bash
dotnet build
dotnet run
```

View File

@@ -1,70 +0,0 @@
### Step 1: Install OpenTelemetry Dependencies
Dependencies related to OpenTelemetry exporter and SDK have to be installed first.
Run the below commands after navigating to the application source folder:
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
//sigNoz Cloud Endpoint
otlpOptions.Endpoint = new Uri("https://ingest.{{REGION}}.signoz.cloud:443");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
//SigNoz Cloud account Ingestion key
string headerKey = "signoz-access-token";
string headerValue = "{{SIGNOZ_INGESTION_KEY}}";
string formattedHeader = $"{headerKey}={headerValue}";
otlpOptions.Headers = formattedHeader;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@@ -1,10 +0,0 @@
&nbsp;
To run your .NET application, use the below command :
```bash
dotnet build
dotnet run
```
Once you run your .NET application, interact with your application to generate some load and see your application in the SigNoz UI.

View File

@@ -1,97 +0,0 @@
## Setup OpenTelemetry Binary as an agent
&nbsp;
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_amd64.tar.gz
```
&nbsp;
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_amd64.tar.gz -C otelcol-contrib
```
&nbsp;
### Step 3: Create `config.yaml` in folder `otelcol-contrib` with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@@ -1,67 +0,0 @@
After setting up the Otel collector agent, follow the steps below to instrument your .NET Application
&nbsp;
&nbsp;
### Step 1: Install OpenTelemetry Dependencies
Install the following dependencies in your application.
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
otlpOptions.Endpoint = new Uri("http://localhost:4317");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@@ -1,18 +0,0 @@
&nbsp;
Once you are done intrumenting your .NET application, you can run it using the below commands
&nbsp;
### Step 1: Run OTel Collector
Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
```bash
./otelcol-contrib --config ./config.yaml
```
&nbsp;
### Step 2: Run your .NET application
```bash
dotnet build
dotnet run
```

View File

@@ -1,70 +0,0 @@
### Step 1: Install OpenTelemetry Dependencies
Dependencies related to OpenTelemetry exporter and SDK have to be installed first.
Run the below commands after navigating to the application source folder:
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
//sigNoz Cloud Endpoint
otlpOptions.Endpoint = new Uri("https://ingest.{{REGION}}.signoz.cloud:443");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
//SigNoz Cloud account Ingestion key
string headerKey = "signoz-access-token";
string headerValue = "{{SIGNOZ_INGESTION_KEY}}";
string formattedHeader = $"{headerKey}={headerValue}";
otlpOptions.Headers = formattedHeader;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@@ -1,10 +0,0 @@
&nbsp;
To run your .NET application, use the below command :
```bash
dotnet build
dotnet run
```
Once you run your .NET application, interact with your application to generate some load and see your application in the SigNoz UI.

View File

@@ -1,98 +0,0 @@
## Setup OpenTelemetry Binary as an agent
&nbsp;
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_arm64.tar.gz
```
&nbsp;
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_arm64.tar.gz -C otelcol-contrib
```
&nbsp;
### Step 3: Create `config.yaml` in folder `otelcol-contrib` with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@@ -1,68 +0,0 @@
After setting up the Otel collector agent, follow the steps below to instrument your .NET Application
&nbsp;
&nbsp;
### Step 1: Install OpenTelemetry Dependencies
Install the following dependencies in your application.
```bash
dotnet add package OpenTelemetry
dotnet add package OpenTelemetry.Exporter.OpenTelemetryProtocol
dotnet add package OpenTelemetry.Extensions.Hosting
dotnet add package OpenTelemetry.Instrumentation.Runtime
dotnet add package OpenTelemetry.Instrumentation.AspNetCore
dotnet add package OpenTelemetry.AutoInstrumentation
```
&nbsp;
### Step 2: Adding OpenTelemetry as a service and configuring exporter options
In your `Program.cs` file, add OpenTelemetry as a service. Here, we are configuring these variables:
`serviceName` - It is the name of your service.
`otlpOptions.Endpoint` - It is the endpoint for your OTel Collector agent.
&nbsp;
Heres a sample `Program.cs` file with the configured variables:
```bash
using System.Diagnostics;
using OpenTelemetry.Exporter;
using OpenTelemetry.Resources;
using OpenTelemetry.Trace;
var builder = WebApplication.CreateBuilder(args);
// Configure OpenTelemetry with tracing and auto-start.
builder.Services.AddOpenTelemetry()
.ConfigureResource(resource =>
resource.AddService(serviceName: "{{MYAPP}}"))
.WithTracing(tracing => tracing
.AddAspNetCoreInstrumentation()
.AddOtlpExporter(otlpOptions =>
{
otlpOptions.Endpoint = new Uri("http://localhost:4317");
otlpOptions.Protocol = OtlpExportProtocol.Grpc;
}));
var app = builder.Build();
//The index route ("/") is set up to write out the OpenTelemetry trace information on the response:
app.MapGet("/", () => $"Hello World! OpenTelemetry Trace: {Activity.Current?.Id}");
app.Run();
```
&nbsp;
The OpenTelemetry.Exporter.Options get or set the target to which the exporter is going to send traces. Here, were configuring it to send traces to the OTel Collector agent. The target must be a valid Uri with the scheme (http or https) and host and may contain a port and a path.
This is done by configuring an OpenTelemetry [TracerProvider](https://github.com/open-telemetry/opentelemetry-dotnet/tree/main/docs/trace/customizing-the-sdk#readme) using extension methods and setting it to auto-start when the host is started.

View File

@@ -1,18 +0,0 @@
&nbsp;
Once you are done intrumenting your .NET application, you can run it using the below commands
&nbsp;
### Step 1: Run OTel Collector
Run this command inside the `otelcol-contrib` directory that you created in the install Otel Collector step
```bash
./otelcol-contrib --config ./config.yaml
```
&nbsp;
### Step 2: Run your .NET application
```bash
dotnet build
dotnet run
```

View File

@@ -37,8 +37,7 @@ To configure your application to send data we will need a function to initialize
import (
.....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"

View File

@@ -38,7 +38,6 @@ To configure your application to send data we will need a function to initialize
import (
.....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"

View File

@@ -38,7 +38,6 @@ To configure your application to send data we will need a function to initialize
import (
.....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"

View File

@@ -38,7 +38,6 @@ To configure your application to send data we will need a function to initialize
import (
.....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"

View File

@@ -38,7 +38,6 @@ To configure your application to send data we will need a function to initialize
import (
.....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"

View File

@@ -38,7 +38,6 @@ To configure your application to send data we will need a function to initialize
import (
.....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"

View File

@@ -38,7 +38,6 @@ To configure your application to send data we will need a function to initialize
import (
.....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"

View File

@@ -38,7 +38,6 @@ To configure your application to send data we will need a function to initialize
import (
.....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"

View File

@@ -38,7 +38,6 @@ To configure your application to send data we will need a function to initialize
import (
.....
"google.golang.org/grpc/credentials"
"github.com/gin-gonic/gin"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"

View File

@@ -1,39 +0,0 @@
### Configure AWS
Create a `~/.aws/credentials` file in the machine which should have `aws_access_key_id` and the `aws_secret_access_key` in the default section of credentials file.
An example credential file would look like this:
```bash
[default]
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
[user1]
aws_access_key_id=AKIAI44QH8DHBEXAMPLE
aws_secret_access_key=je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY
```
**Note:** Replace the `aws_access_key_id`, `aws_secret_access_key`, `aws_access_key_id` and `aws_secret_access_key` with your credential values.
&nbsp;
The account corresponding to these credentials should have the **below-mentioned AWS Identity and Access Management (IAM)** policy. This allows describing and filtering log events across all log groups of that particular AWS account, a crucial step for forwarding CloudWatch logs to SigNoz.
```bash
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"logs:DescribeLogGroups",
"logs:FilterLogEvents"
],
"Resource": "arn:aws:logs:*:090340947446:log-group:*"
}
]
}
```
**Important Note:** Make sure you have AWS configured on the machine where otel-collector is running.

View File

@@ -1,50 +0,0 @@
### Configure awscloudwatch receiver
Add the `awscloudwatch` receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
&nbsp;
You can configure your receiver to collect logs with different conditions.
&nbsp;
Here are two sample configurations:
- This configuration below will do autodiscovery and collect 100 log groups starting with prefix application.
```bash
receivers:
...
awscloudwatch:
region: us-east-1
logs:
poll_interval: 1m
groups:
autodiscover:
limit: 100
prefix: application
...
```
- This configuration below will not do autodiscovery and specifies the names of the log groups to collect.
```bash
receivers:
...
awscloudwatch:
profile: 'my-profile'
region: us-west-1
logs:
poll_interval: 5m
groups:
named:
/aws/eks/dev-0/cluster:
...
```
&nbsp;
To know more about the different parameters of awscloudwatch receiver, and see more sample configuration, checkout this [GitHub link](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/awscloudwatchreceiver)

View File

@@ -1,94 +0,0 @@
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_amd64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_amd64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@@ -1,38 +0,0 @@
### Send logs to SigNoz
To test out the receiver, create a pipeline in the pipeline section of the `config.yaml` of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
```bash
...
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp, awscloudwatch]
processors: [batch]
exporters: [otlp]
```
&nbsp;
### Run OTel Collector
Run this command inside the `otelcol-contrib` directory :
```bash
./otelcol-contrib --config ./config.yaml
```
You should be able to see your Cloudwatch logs in the logs tabs of SigNoz Cloud UI.

View File

@@ -1,39 +0,0 @@
### Configure AWS
Create a `~/.aws/credentials` file in the machine which should have `aws_access_key_id` and the `aws_secret_access_key` in the default section of credentials file.
An example credential file would look like this:
```bash
[default]
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
[user1]
aws_access_key_id=AKIAI44QH8DHBEXAMPLE
aws_secret_access_key=je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY
```
**Note:** Replace the `aws_access_key_id`, `aws_secret_access_key`, `aws_access_key_id` and `aws_secret_access_key` with your credential values.
&nbsp;
The account corresponding to these credentials should have the **below-mentioned AWS Identity and Access Management (IAM)** policy. This allows describing and filtering log events across all log groups of that particular AWS account, a crucial step for forwarding CloudWatch logs to SigNoz.
```bash
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"logs:DescribeLogGroups",
"logs:FilterLogEvents"
],
"Resource": "arn:aws:logs:*:090340947446:log-group:*"
}
]
}
```
**Important Note:** Make sure you have AWS configured on the machine where otel-collector is running.

View File

@@ -1,50 +0,0 @@
### Configure awscloudwatch receiver
Add the `awscloudwatch` receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
&nbsp;
You can configure your receiver to collect logs with different conditions.
&nbsp;
Here are two sample configurations:
- This configuration below will do autodiscovery and collect 100 log groups starting with prefix application.
```bash
receivers:
...
awscloudwatch:
region: us-east-1
logs:
poll_interval: 1m
groups:
autodiscover:
limit: 100
prefix: application
...
```
- This configuration below will not do autodiscovery and specifies the names of the log groups to collect.
```bash
receivers:
...
awscloudwatch:
profile: 'my-profile'
region: us-west-1
logs:
poll_interval: 5m
groups:
named:
/aws/eks/dev-0/cluster:
...
```
&nbsp;
To know more about the different parameters of awscloudwatch receiver, and see more sample configuration, checkout this [GitHub link](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/awscloudwatchreceiver)

View File

@@ -1,95 +0,0 @@
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_linux_arm64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_linux_arm64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@@ -1,38 +0,0 @@
### Send logs to SigNoz
To test out the receiver, create a pipeline in the pipeline section of the `config.yaml` of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
```bash
...
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp, awscloudwatch]
processors: [batch]
exporters: [otlp]
```
&nbsp;
### Run OTel Collector
Run this command inside the `otelcol-contrib` directory :
```bash
./otelcol-contrib --config ./config.yaml
```
You should be able to see your Cloudwatch logs in the logs tabs of SigNoz Cloud UI.

View File

@@ -1,39 +0,0 @@
### Configure AWS
Create a `~/.aws/credentials` file in the machine which should have `aws_access_key_id` and the `aws_secret_access_key` in the default section of credentials file.
An example credential file would look like this:
```bash
[default]
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
[user1]
aws_access_key_id=AKIAI44QH8DHBEXAMPLE
aws_secret_access_key=je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY
```
**Note:** Replace the `aws_access_key_id`, `aws_secret_access_key`, `aws_access_key_id` and `aws_secret_access_key` with your credential values.
&nbsp;
The account corresponding to these credentials should have the **below-mentioned AWS Identity and Access Management (IAM)** policy. This allows describing and filtering log events across all log groups of that particular AWS account, a crucial step for forwarding CloudWatch logs to SigNoz.
```bash
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"logs:DescribeLogGroups",
"logs:FilterLogEvents"
],
"Resource": "arn:aws:logs:*:090340947446:log-group:*"
}
]
}
```
**Important Note:** Make sure you have AWS configured on the machine where otel-collector is running.

View File

@@ -1,50 +0,0 @@
### Configure awscloudwatch receiver
Add the `awscloudwatch` receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
&nbsp;
You can configure your receiver to collect logs with different conditions.
&nbsp;
Here are two sample configurations:
- This configuration below will do autodiscovery and collect 100 log groups starting with prefix application.
```bash
receivers:
...
awscloudwatch:
region: us-east-1
logs:
poll_interval: 1m
groups:
autodiscover:
limit: 100
prefix: application
...
```
- This configuration below will not do autodiscovery and specifies the names of the log groups to collect.
```bash
receivers:
...
awscloudwatch:
profile: 'my-profile'
region: us-west-1
logs:
poll_interval: 5m
groups:
named:
/aws/eks/dev-0/cluster:
...
```
&nbsp;
To know more about the different parameters of awscloudwatch receiver, and see more sample configuration, checkout this [GitHub link](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/awscloudwatchreceiver)

View File

@@ -1,94 +0,0 @@
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_amd64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_amd64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@@ -1,38 +0,0 @@
### Send logs to SigNoz
To test out the receiver, create a pipeline in the pipeline section of the `config.yaml` of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
```bash
...
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp, awscloudwatch]
processors: [batch]
exporters: [otlp]
```
&nbsp;
### Run OTel Collector
Run this command inside the `otelcol-contrib` directory :
```bash
./otelcol-contrib --config ./config.yaml
```
You should be able to see your Cloudwatch logs in the logs tabs of SigNoz Cloud UI.

View File

@@ -1,39 +0,0 @@
### Configure AWS
Create a `~/.aws/credentials` file in the machine which should have `aws_access_key_id` and the `aws_secret_access_key` in the default section of credentials file.
An example credential file would look like this:
```bash
[default]
aws_access_key_id=AKIAIOSFODNN7EXAMPLE
aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
[user1]
aws_access_key_id=AKIAI44QH8DHBEXAMPLE
aws_secret_access_key=je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY
```
**Note:** Replace the `aws_access_key_id`, `aws_secret_access_key`, `aws_access_key_id` and `aws_secret_access_key` with your credential values.
&nbsp;
The account corresponding to these credentials should have the **below-mentioned AWS Identity and Access Management (IAM)** policy. This allows describing and filtering log events across all log groups of that particular AWS account, a crucial step for forwarding CloudWatch logs to SigNoz.
```bash
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"logs:DescribeLogGroups",
"logs:FilterLogEvents"
],
"Resource": "arn:aws:logs:*:090340947446:log-group:*"
}
]
}
```
**Important Note:** Make sure you have AWS configured on the machine where otel-collector is running.

View File

@@ -1,50 +0,0 @@
### Configure awscloudwatch receiver
Add the `awscloudwatch` receiver in the receivers section of `config.yaml` file of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
&nbsp;
You can configure your receiver to collect logs with different conditions.
&nbsp;
Here are two sample configurations:
- This configuration below will do autodiscovery and collect 100 log groups starting with prefix application.
```bash
receivers:
...
awscloudwatch:
region: us-east-1
logs:
poll_interval: 1m
groups:
autodiscover:
limit: 100
prefix: application
...
```
- This configuration below will not do autodiscovery and specifies the names of the log groups to collect.
```bash
receivers:
...
awscloudwatch:
profile: 'my-profile'
region: us-west-1
logs:
poll_interval: 5m
groups:
named:
/aws/eks/dev-0/cluster:
...
```
&nbsp;
To know more about the different parameters of awscloudwatch receiver, and see more sample configuration, checkout this [GitHub link](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/awscloudwatchreceiver)

View File

@@ -1,93 +0,0 @@
### Setup OpenTelemetry Binary as an agent
### Step 1: Download otel-collector tar.gz
```bash
wget https://github.com/open-telemetry/opentelemetry-collector-releases/releases/download/v0.79.0/otelcol-contrib_0.79.0_darwin_arm64.tar.gz
```
### Step 2: Extract otel-collector tar.gz to the `otelcol-contrib` folder
```bash
mkdir otelcol-contrib && tar xvzf otelcol-contrib_0.79.0_darwin_arm64.tar.gz -C otelcol-contrib
```
### Step 3: Create config.yaml in folder otelcol-contrib with the below content in it
```bash
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
hostmetrics:
collection_interval: 60s
scrapers:
cpu: {}
disk: {}
load: {}
filesystem: {}
memory: {}
network: {}
paging: {}
process:
mute_process_name_error: true
mute_process_exe_error: true
mute_process_io_error: true
processes: {}
prometheus:
config:
global:
scrape_interval: 60s
scrape_configs:
- job_name: otel-collector-binary
static_configs:
- targets:
# - localhost:8888
processors:
batch:
send_batch_size: 1000
timeout: 10s
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
resourcedetection:
detectors: [env, system] # Before system detector, include ec2 for AWS, gcp for GCP and azure for Azure.
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
timeout: 2s
system:
hostname_sources: [os] # alternatively, use [dns,os] for setting FQDN as host.name and os as fallback
extensions:
health_check: {}
zpages: {}
exporters:
otlp:
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
tls:
insecure: false
headers:
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
logging:
verbosity: normal
service:
telemetry:
metrics:
address: 0.0.0.0:8888
extensions: [health_check, zpages]
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
```

View File

@@ -1,38 +0,0 @@
### Send logs to SigNoz
To test out the receiver, create a pipeline in the pipeline section of the `config.yaml` of the **`otecol-contrib`** directory that you created in the Setup Otel Collector Step.
```bash
...
pipelines:
metrics:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
metrics/internal:
receivers: [prometheus, hostmetrics]
processors: [resourcedetection, batch]
exporters: [otlp]
traces:
receivers: [otlp]
processors: [batch]
exporters: [otlp]
logs:
receivers: [otlp, awscloudwatch]
processors: [batch]
exporters: [otlp]
```
&nbsp;
### Run OTel Collector
Run this command inside the `otelcol-contrib` directory :
```bash
./otelcol-contrib --config ./config.yaml
```
You should be able to see your Cloudwatch logs in the logs tabs of SigNoz Cloud UI.

View File

@@ -1,29 +0,0 @@
You can stream logs from Heroku to SigNoz using [httpsdrain](https://devcenter.heroku.com/articles/log-drains#https-drains).
&nbsp;
### Use the Heroku CLI to add a https drain
```bash
heroku drains:add https://<TENANT_NAME>:{{SIGNOZ_INGESTION_KEY}}@ingest.{{REGION}}.signoz.cloud:443/logs/heroku -a <YOUR_APP_NAME>
```
&nbsp;
`<TENANT_NAME>` should be raplaced with the name of your SigNoz instance.
For example, if your SigNoz instance URL is `https://cpvo-test.us.signoz.cloud` the `TENANT_NAME` is `cpvo-test`.
**Note:** You can find your instance URL in your browser's current tab address bar or in the invite email sent to you.
&nbsp;
`<YOUR_APP_NAME>` is the name of the Heroku application where you want to add the drain.
&nbsp;
Once you have successfully added the drain, click on the `Done` button below to see your logs in the SigNoz UI.

View File

@@ -1,95 +0,0 @@
## Payload Structure
To send logs to SigNoz over HTTP, we have a payload structure which is an array of JSON logs which adheres to the [OTEL Logs Data Model](https://opentelemetry.io/docs/specs/otel/logs/data-model/).
&nbsp;
The structure of the Payload has the following fields:
```bash
[
{
"timestamp": <uint64>,
"trace_id": <hex string>,
"span_id": <hex string>,
"trace_flags": <int>,
"severity_text": <string>,
"severity_number": <int>,
"attributes": <map>,
"resources": <map>,
"body": <string>,
}
]
```
**Notes:**
* `timestamp` is an int64 representing nanaseconds since the Unix epoch.
* You can use **body** or **message** to denote the log content.
&nbsp;
Any other fields present apart from the ones mentioned above will be moved to the **attributes map**. For example:
```bash
[
{
"host": "myhost",
"method": "GET",
"body": "this is a log line"
}
]
```
Will be treated as:
```bash
[
{
"attributes": {
"host": "myhost",
"method": "GET"
},
"body": "this is a log line"
}
]
```
&nbsp;
## Send logs
This is a **sample cURL request** which can be used as a template:
&nbsp;
```bash
curl --location 'https://ingest.{{REGION}}.signoz.cloud:443/logs/json/' \
--header 'Content-Type: application/json' \
--header 'signoz-access-token: {{SIGNOZ_INGESTION_KEY}}' \
--data '[
{
"trace_id": "000000000000000018c51935df0b93b9",
"span_id": "18c51935df0b93b9",
"trace_flags": 0,
"severity_text": "info",
"severity_number": 4,
"attributes": {
"method": "GET",
"path": "/api/users"
},
"resources": {
"host": "myhost",
"namespace": "prod"
},
"message": "This is a log line"
}
]'
```
&nbsp;
This curl request will have the timestamp of when you send the above log.
&nbsp;
To specify a particular timestamp in your log, ensure you include the `timestamp` field in your cURL request. Place the timestamp field before the `trace_id` field. For example, `timestamp`: 1698310066000000000
&nbsp;
**Note:** You can customize the cURL request as needed for your specific use case.

View File

@@ -1,50 +0,0 @@
You can stream logs from Vercel to SigNoz using [log drains](https://vercel.com/docs/observability/log-drains-overview/log-drains#configure-a-log-drain).
**Note:** Log Drains are only supported in **Vercel Pro** and **Enterprise accounts**.
&nbsp;
### Step 1: Select Sources
* From the Vercel dashboard, go to **Team Settings > Log Drains**.
&nbsp;
* Select sources from which you want to collect logs (Example -> Statci, External, Lambda etc.)
&nbsp;
* Choose delivery format as `JSON`
&nbsp;
* Specify your target projects
&nbsp;
### Step 2: Add Log Drain
* Enter the endpoint URL as follows:
```bash
https://ingest.{{REGION}}.signoz.cloud:443/logs/json
```
&nbsp;
* Enable **Custom Headers** and add the headers `signoz-access-token` and `x-vercel-verify`
```bash
signoz-access-token: {{SIGNOZ_INGESTION_KEY}}
```
```bash
x-vercel-verify: <YOUR_VERCEL_VERIFY_TOKEN>
```
**Note:** The value of `x-vercel-verify` will be visible on your screen in the endpoint section.
&nbsp;
* Click on **Verify** button and then **Add Log Drain** button in Vercel.
&nbsp;
Click on the **Done** button below and you should be able to see your logs in SigNoz.

View File

@@ -67,6 +67,7 @@ export default function MarkdownStep(): JSX.Element {
} else if (selectedModule?.id === ModulesMap.InfrastructureMonitoring) {
docFilePaths = InfraMonitoringDocFilePaths;
}
// @ts-ignore
if (docFilePaths && docFilePaths[path]) {
// @ts-ignore

View File

@@ -46,11 +46,6 @@ export default function SelectMethod(): JSX.Element {
<div>
<Radio.Group onChange={onChange} value={value}>
<Space direction="vertical">
<Radio value={OnboardingMethods.QUICK_START}>
<Typography.Text> Quick Start </Typography.Text> <br />
<small>Send data to SigNoz directly from OpenTelemetry SDK.</small>
</Radio>
<Radio value={OnboardingMethods.RECOMMENDED_STEPS}>
<Typography.Text> Use Recommended Steps </Typography.Text> <br />
<small>
@@ -58,6 +53,11 @@ export default function SelectMethod(): JSX.Element {
you send to SigNoz, collect host metrics & logs).
</small>
</Radio>
<Radio value={OnboardingMethods.QUICK_START}>
<Typography.Text> Quick Start </Typography.Text> <br />
<small>Send data to SigNoz directly from OpenTelemetry SDK.</small>
</Radio>
</Space>
</Radio.Group>
</div>

View File

@@ -7,40 +7,6 @@
/// ////// JavaScript Done
/// ///// Go Start
// Go-Kubernetes
/// /// ROR Done
/// /// .NET Start
// dotnet-Kubernetes
import APM_dotnet_kubernetes_recommendedSteps_setupOtelCollector from '../Modules/APM/Dotnet/md-docs/Kubernetes/dotnet-kubernetes-installOtelCollector.md';
import APM_dotnet_kubernetes_recommendedSteps_instrumentApplication from '../Modules/APM/Dotnet/md-docs/Kubernetes/dotnet-kubernetes-instrumentApplication.md';
import APM_dotnet_kubernetes_recommendedSteps_runApplication from '../Modules/APM/Dotnet/md-docs/Kubernetes/dotnet-kubernetes-runApplication.md';
// dotnet-LinuxAMD64-quickstart
import APM_dotnet_linuxAMD64_quickStart_instrumentApplication from '../Modules/APM/Dotnet/md-docs/LinuxAMD64/QuickStart/dotnet-linuxamd64-quickStart-instrumentApplication.md';
import APM_dotnet_linuxAMD64_quickStart_runApplication from '../Modules/APM/Dotnet/md-docs/LinuxAMD64/QuickStart/dotnet-linuxamd64-quickStart-runApplication.md';
// dotnet-LinuxAMD64-recommended
import APM_dotnet_linuxAMD64_recommendedSteps_setupOtelCollector from '../Modules/APM/Dotnet/md-docs/LinuxAMD64/Recommended/dotnet-linuxamd64-recommended-installOtelCollector.md';
import APM_dotnet_linuxAMD64_recommendedSteps_instrumentApplication from '../Modules/APM/Dotnet/md-docs/LinuxAMD64/Recommended/dotnet-linuxamd64-recommended-instrumentApplication.md';
import APM_dotnet_linuxAMD64_recommendedSteps_runApplication from '../Modules/APM/Dotnet/md-docs/LinuxAMD64/Recommended/dotnet-linuxamd64-recommended-runApplication.md';
// dotnet-LinuxARM64-quickstart
import APM_dotnet_linuxARM64_quickStart_instrumentApplication from '../Modules/APM/Dotnet/md-docs/LinuxARM64/QuickStart/dotnet-linuxarm64-quickStart-instrumentApplication.md';
import APM_dotnet_linuxARM64_quickStart_runApplication from '../Modules/APM/Dotnet/md-docs/LinuxARM64/QuickStart/dotnet-linuxarm64-quickStart-runApplication.md';
// dotnet-LinuxARM64-recommended
import APM_dotnet_linuxARM64_recommendedSteps_setupOtelCollector from '../Modules/APM/Dotnet/md-docs/LinuxARM64/Recommended/dotnet-linuxarm64-recommended-installOtelCollector.md';
import APM_dotnet_linuxARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Dotnet/md-docs/LinuxARM64/Recommended/dotnet-linuxarm64-recommended-instrumentApplication.md';
import APM_dotnet_linuxARM64_recommendedSteps_runApplication from '../Modules/APM/Dotnet/md-docs/LinuxARM64/Recommended/dotnet-linuxarm64-recommended-runApplication.md';
// dotnet-MacOsAMD64-quickstart
import APM_dotnet_macOsAMD64_quickStart_instrumentApplication from '../Modules/APM/Dotnet/md-docs/MacOsAMD64/QuickStart/dotnet-macosamd64-quickStart-instrumentApplication.md';
import APM_dotnet_macOsAMD64_quickStart_runApplication from '../Modules/APM/Dotnet/md-docs/MacOsAMD64/QuickStart/dotnet-macosamd64-quickStart-runApplication.md';
// dotnet-MacOsAMD64-recommended
import APM_dotnet_macOsAMD64_recommendedSteps_setupOtelCollector from '../Modules/APM/Dotnet/md-docs/MacOsAMD64/Recommended/dotnet-macosamd64-recommended-installOtelCollector.md';
import APM_dotnet_macOsAMD64_recommendedSteps_instrumentApplication from '../Modules/APM/Dotnet/md-docs/MacOsAMD64/Recommended/dotnet-macosamd64-recommended-instrumentApplication.md';
import APM_dotnet_macOsAMD64_recommendedSteps_runApplication from '../Modules/APM/Dotnet/md-docs/MacOsAMD64/Recommended/dotnet-macosamd64-recommended-runApplication.md';
// dotnet-MacOsARM64-quickstart
import APM_dotnet_macOsARM64_quickStart_instrumentApplication from '../Modules/APM/Dotnet/md-docs/MacOsARM64/QuickStart/dotnet-macosarm64-quickStart-instrumentApplication.md';
import APM_dotnet_macOsARM64_quickStart_runApplication from '../Modules/APM/Dotnet/md-docs/MacOsARM64/QuickStart/dotnet-macosarm64-quickStart-runApplication.md';
// dotnet-MacOsARM64-recommended
import APM_dotnet_macOsARM64_recommendedSteps_setupOtelCollector from '../Modules/APM/Dotnet/md-docs/MacOsARM64/Recommended/dotnet-macosarm64-recommended-installOtelCollector.md';
import APM_dotnet_macOsARM64_recommendedSteps_instrumentApplication from '../Modules/APM/Dotnet/md-docs/MacOsARM64/Recommended/dotnet-macosarm64-recommended-instrumentApplication.md';
import APM_dotnet_macOsARM64_recommendedSteps_runApplication from '../Modules/APM/Dotnet/md-docs/MacOsARM64/Recommended/dotnet-macosarm64-recommended-runApplication.md';
import APM_go_kubernetes_recommendedSteps_setupOtelCollector from '../Modules/APM/GoLang/md-docs/Kubernetes/golang-kubernetes-installOtelCollector.md';
import APM_go_kubernetes_recommendedSteps_instrumentApplication from '../Modules/APM/GoLang/md-docs/Kubernetes/golang-kubernetes-instrumentApplication.md';
import APM_go_kubernetes_recommendedSteps_runApplication from '../Modules/APM/GoLang/md-docs/Kubernetes/golang-kubernetes-runApplication.md';
@@ -1134,7 +1100,7 @@ export const ApmDocFilePaths = {
APM_rails_linuxARM64_recommendedSteps_instrumentApplication,
APM_rails_linuxARM64_recommendedSteps_runApplication,
// ROR-LinuxARM64-quickstart
// ROR-LinuxARM64-quinestjs
APM_rails_linuxARM64_quickStart_instrumentApplication,
APM_rails_linuxARM64_quickStart_runApplication,
@@ -1155,51 +1121,4 @@ export const ApmDocFilePaths = {
// ROR-MacOsARM64-quickstart
APM_rails_macOsARM64_quickStart_instrumentApplication,
APM_rails_macOsARM64_quickStart_runApplication,
// ------------------------------------------------------------------------------------------------
/// //// ROR Done
/// //// .NET Start
// ROR-Kubernetes
APM_dotnet_kubernetes_recommendedSteps_setupOtelCollector,
APM_dotnet_kubernetes_recommendedSteps_instrumentApplication,
APM_dotnet_kubernetes_recommendedSteps_runApplication,
// ROR-LinuxAMD64-quickstart
APM_dotnet_linuxAMD64_quickStart_instrumentApplication,
APM_dotnet_linuxAMD64_quickStart_runApplication,
// ROR-LinuxAMD64-recommended
APM_dotnet_linuxAMD64_recommendedSteps_setupOtelCollector,
APM_dotnet_linuxAMD64_recommendedSteps_instrumentApplication,
APM_dotnet_linuxAMD64_recommendedSteps_runApplication,
// ROR-LinuxARM64-quickstart
APM_dotnet_linuxARM64_quickStart_instrumentApplication,
APM_dotnet_linuxARM64_quickStart_runApplication,
// ROR-LinuxARM64-recommended
APM_dotnet_linuxARM64_recommendedSteps_setupOtelCollector,
APM_dotnet_linuxARM64_recommendedSteps_instrumentApplication,
APM_dotnet_linuxARM64_recommendedSteps_runApplication,
// ROR-MacOsAMD64-quickstart
APM_dotnet_macOsAMD64_quickStart_instrumentApplication,
APM_dotnet_macOsAMD64_quickStart_runApplication,
// ROR-MacOsAMD64-recommended
APM_dotnet_macOsAMD64_recommendedSteps_setupOtelCollector,
APM_dotnet_macOsAMD64_recommendedSteps_instrumentApplication,
APM_dotnet_macOsAMD64_recommendedSteps_runApplication,
// ROR-MacOsARM64-quickstart
APM_dotnet_macOsARM64_quickStart_instrumentApplication,
APM_dotnet_macOsARM64_quickStart_runApplication,
// ROR-MacOsARM64-recommended
APM_dotnet_macOsARM64_recommendedSteps_setupOtelCollector,
APM_dotnet_macOsARM64_recommendedSteps_instrumentApplication,
APM_dotnet_macOsARM64_recommendedSteps_runApplication,
};

View File

@@ -121,40 +121,6 @@ import LogsManagement_logStash_macOsARM64_setupOtelCollector from '../Modules/Lo
import LogsManagement_logStash_macOsARM64_configureReceiver from '../Modules/LogsManagement/Logstash/md-docs/MacOsARM64/logstash-macosarm64-configureReceiver.md';
import LogsManagement_logStash_macOsARM64_restartOtelCollector from '../Modules/LogsManagement/Logstash/md-docs/MacOsARM64/logstash-macosarm64-restartOtelCollector.md';
// Heroku
import LogsManagement_heroku_addHttpDrain from '../Modules/LogsManagement/Heroku/md-docs/heroku-addHttpDrain.md';
// Vercel
import LogsManagement_vercel_setupLogDrains from '../Modules/LogsManagement/Vercel/md-docs/vercel-setupLogDrains.md';
// HTTP
import LogsManagement_http_createHttpPayload from '../Modules/LogsManagement/Http/md-docs/httpJsonPayload.md';
// Cloudwatch
import LogsManagement_cloudwatch_linuxAMD64_setupOtelCollector from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxAMD64/cloudwatch-linuxamd64-installOtelCollector.md';
import LogsManagement_cloudwatch_linuxAMD64_configureAws from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxAMD64/cloudwatch-linuxamd64-configureAws.md';
import LogsManagement_cloudwatch_linuxAMD64_configureReceiver from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxAMD64/cloudwatch-linuxamd64-configureReceiver.md';
import LogsManagement_cloudwatch_linuxAMD64_sendLogsCloudwatch from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxAMD64/cloudwatch-linuxamd64-sendLogs.md';
import LogsManagement_cloudwatch_linuxARM64_setupOtelCollector from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxARM64/cloudwatch-linuxarm64-installOtelCollector.md';
import LogsManagement_cloudwatch_linuxARM64_configureAws from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxARM64/cloudwatch-linuxarm64-configureAws.md';
import LogsManagement_cloudwatch_linuxARM64_configureReceiver from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxARM64/cloudwatch-linuxarm64-configureReceiver.md';
import LogsManagement_cloudwatch_linuxARM64_sendLogsCloudwatch from '../Modules/LogsManagement/Cloudwatch/md-docs/LinuxARM64/cloudwatch-linuxarm64-sendLogs.md';
import LogsManagement_cloudwatch_macOsAMD64_setupOtelCollector from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsAMD64/cloudwatch-macosamd64-installOtelCollector.md';
import LogsManagement_cloudwatch_macOsAMD64_configureAws from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsAMD64/cloudwatch-macosamd64-configureAws.md';
import LogsManagement_cloudwatch_macOsAMD64_configureReceiver from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsAMD64/cloudwatch-macosamd64-configureReceiver.md';
import LogsManagement_cloudwatch_macOsAMD64_sendLogsCloudwatch from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsAMD64/cloudwatch-macosamd64-sendLogs.md';
import LogsManagement_cloudwatch_macOsARM64_setupOtelCollector from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsARM64/cloudwatch-macosarm64-installOtelCollector.md';
import LogsManagement_cloudwatch_macOsARM64_configureAws from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsARM64/cloudwatch-macosarm64-configureAws.md';
import LogsManagement_cloudwatch_macOsARM64_configureReceiver from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsARM64/cloudwatch-macosarm64-configureReceiver.md';
import LogsManagement_cloudwatch_macOsARM64_sendLogsCloudwatch from '../Modules/LogsManagement/Cloudwatch/md-docs/MacOsARM64/cloudwatch-macosarm64-sendLogs.md';
export const LogsManagementDocFilePaths = {
// Kubernetes Pod Logs
LogsManagement_kubernetes_setupOtelCollector,
@@ -269,36 +235,5 @@ export const LogsManagementDocFilePaths = {
LogsManagement_logStash_macOsARM64_setupOtelCollector,
LogsManagement_logStash_macOsARM64_configureReceiver,
LogsManagement_logStash_macOsARM64_restartOtelCollector,
// Heroku
LogsManagement_heroku_addHttpDrain,
// ------------------------------------------------------------------------------------------------
// Vercel
LogsManagement_vercel_setupLogDrains,
// HTTP
LogsManagement_http_createHttpPayload,
// Cloudwatch
LogsManagement_cloudwatch_linuxAMD64_setupOtelCollector,
LogsManagement_cloudwatch_linuxAMD64_configureAws,
LogsManagement_cloudwatch_linuxAMD64_configureReceiver,
LogsManagement_cloudwatch_linuxAMD64_sendLogsCloudwatch,
LogsManagement_cloudwatch_linuxARM64_setupOtelCollector,
LogsManagement_cloudwatch_linuxARM64_configureAws,
LogsManagement_cloudwatch_linuxARM64_configureReceiver,
LogsManagement_cloudwatch_linuxARM64_sendLogsCloudwatch,
LogsManagement_cloudwatch_macOsAMD64_setupOtelCollector,
LogsManagement_cloudwatch_macOsAMD64_configureAws,
LogsManagement_cloudwatch_macOsAMD64_configureReceiver,
LogsManagement_cloudwatch_macOsAMD64_sendLogsCloudwatch,
LogsManagement_cloudwatch_macOsARM64_setupOtelCollector,
LogsManagement_cloudwatch_macOsARM64_configureAws,
LogsManagement_cloudwatch_macOsARM64_configureReceiver,
LogsManagement_cloudwatch_macOsARM64_sendLogsCloudwatch,
};

View File

@@ -22,11 +22,6 @@ export const stepsMap = {
plotMetrics: 'plotMetrics',
configureHostmetricsJson: 'configureHostmetricsJson',
configureMetricsReceiver: 'configureMetricsReceiver',
addHttpDrain: 'addHttpDrain',
setupLogDrains: `setupLogDrains`,
createHttpPayload: `createHttpPayload`,
configureAws: `configureAws`,
sendLogsCloudwatch: `sendLogsCloudwatch`,
};
export const DataSourceStep: SelectedModuleStepProps = {
@@ -124,32 +119,3 @@ export const ConfigureMetricsReceiver: SelectedModuleStepProps = {
title: 'Configure Metrics Receiver',
component: <MarkdownStep />,
};
export const AddHttpDrain: SelectedModuleStepProps = {
id: stepsMap.addHttpDrain,
title: 'Add HTTP Drain',
component: <MarkdownStep />,
};
export const SetupLogDrains: SelectedModuleStepProps = {
id: stepsMap.setupLogDrains,
title: 'Setup Log Drains',
component: <MarkdownStep />,
};
export const CreateHttpPayload: SelectedModuleStepProps = {
id: stepsMap.createHttpPayload,
title: 'Create Json Payload',
component: <MarkdownStep />,
};
export const ConfigureAws: SelectedModuleStepProps = {
id: stepsMap.configureAws,
title: 'Configure AWS',
component: <MarkdownStep />,
};
export const SendLogsCloudwatch: SelectedModuleStepProps = {
id: stepsMap.sendLogsCloudwatch,
title: 'Send Logs',
component: <MarkdownStep />,
};

View File

@@ -54,7 +54,7 @@ function OnboardingContextProvider({
const [selectedFramework, setSelectedFramework] = useState<string>('');
const [selectedMethod, setSelectedMethod] = useState(
OnboardingMethods.QUICK_START,
OnboardingMethods.RECOMMENDED_STEPS,
);
const [

View File

@@ -94,11 +94,6 @@ const supportedLanguages = [
id: 'rails',
imgURL: `Logos/rails.png`,
},
{
name: '.NET',
id: 'dotnet',
imgURL: `Logos/dotnet.png`,
},
];
export const defaultLogsType = {
@@ -143,26 +138,6 @@ const supportedLogsTypes = [
id: 'logStash',
imgURL: `Logos/logstash.svg`,
},
{
name: 'Heroku',
id: 'heroku',
imgURL: `Logos/heroku.png`,
},
{
name: 'Vercel',
id: 'vercel',
imgURL: `Logos/vercel.png`,
},
{
name: 'HTTP',
id: 'http',
imgURL: `Logos/http.png`,
},
{
name: 'Cloudwatch',
id: 'cloudwatch',
imgURL: `Logos/cloudwatch.png`,
},
];
export const defaultInfraMetricsType = {
@@ -213,8 +188,7 @@ export const getSupportedFrameworks = ({
if (
(moduleID === ModulesMap.APM && dataSourceName === 'go') ||
(moduleID === ModulesMap.APM && dataSourceName === 'rails') ||
(moduleID === ModulesMap.APM && dataSourceName === '.NET')
(moduleID === ModulesMap.APM && dataSourceName === 'rails')
) {
return [];
}
@@ -239,8 +213,7 @@ export const hasFrameworks = ({
moduleID === ModulesMap.LogsManagement ||
moduleID === ModulesMap.InfrastructureMonitoring ||
(moduleID === ModulesMap.APM && dataSourceName === 'go') ||
(moduleID === ModulesMap.APM && dataSourceName === 'rails') ||
(moduleID === ModulesMap.APM && dataSourceName === '.NET')
(moduleID === ModulesMap.APM && dataSourceName === 'rails')
) {
return false;
}

View File

@@ -1,12 +1,9 @@
import {
AddHttpDrain,
CheckServiceStatus,
CloneRepo,
ConfigureAws,
ConfigureHostmetricsJSON,
ConfigureMetricsReceiver,
ConfigureReceiver,
CreateHttpPayload,
DataSourceStep,
EnvDetailsStep,
InstallOpenTelemetryStep,
@@ -15,8 +12,6 @@ import {
RestartOtelCollector,
RunApplicationStep,
SelectMethodStep,
SendLogsCloudwatch,
SetupLogDrains,
SetupOtelCollectorStep,
StartContainer,
TestConnectionStep,
@@ -79,21 +74,6 @@ export const getSteps = ({
ConfigureReceiver,
RestartOtelCollector,
];
case 'heroku':
return [DataSourceStep, AddHttpDrain];
case 'vercel':
return [DataSourceStep, SetupLogDrains];
case 'http':
return [DataSourceStep, CreateHttpPayload];
case 'cloudwatch':
return [
DataSourceStep,
EnvDetailsStep,
SetupOtelCollectorStep,
ConfigureAws,
ConfigureReceiver,
SendLogsCloudwatch,
];
case 'kubernetesInfraMetrics':
return [DataSourceStep, SetupOtelCollectorStep, PlotMetrics];

View File

@@ -201,7 +201,6 @@ function PendingInvitesContainer(): JSX.Element {
email: member.email,
name: member.name,
role: member.role,
frontendBaseUrl: window.location.origin,
});
if (statusCode !== 200) {
@@ -272,21 +271,15 @@ function PendingInvitesContainer(): JSX.Element {
<Space direction="vertical" size="middle">
<TitleWrapper>
<Typography.Title level={3}>{t('pending_invites')}</Typography.Title>
<Space>
<Typography.Text type="warning">
{t('invite_link_share_manually')}
</Typography.Text>
<Button
icon={<PlusOutlined />}
type="primary"
onClick={(): void => {
toggleModal(true);
}}
>
{t('invite_members')}
</Button>
</Space>
<Button
icon={<PlusOutlined />}
type="primary"
onClick={(): void => {
toggleModal(true);
}}
>
{t('invite_members')}
</Button>
</TitleWrapper>
<ResizeTable
columns={columns}

View File

@@ -25,9 +25,7 @@ import { ExtendedSelectOption } from 'types/common/select';
import { popupContainer } from 'utils/selectPopupContainer';
import { transformToUpperCase } from 'utils/transformToUpperCase';
import { removePrefix } from '../GroupByFilter/utils';
import { selectStyle } from '../QueryBuilderSearch/config';
import OptionRenderer from '../QueryBuilderSearch/OptionRenderer';
// ** Types
import { AgregatorFilterProps } from './AggregatorFilter.intefaces';
@@ -66,23 +64,11 @@ export const AggregatorFilter = memo(function AggregatorFilter({
onSuccess: (data) => {
const options: ExtendedSelectOption[] =
data?.payload?.attributeKeys?.map(({ id: _, ...item }) => ({
label: (
<OptionRenderer
label={transformStringWithPrefix({
str: item.key,
prefix: item.type || '',
condition: !item.isColumn,
})}
value={removePrefix(
transformStringWithPrefix({
str: item.key,
prefix: item.type || '',
condition: !item.isColumn,
}),
)}
dataType={item.dataType}
/>
),
label: transformStringWithPrefix({
str: item.key,
prefix: item.type || '',
condition: !item.isColumn,
}),
value: `${item.key}${selectValueDivider}${createIdFromObjectFields(
item,
baseAutoCompleteIdKeysOrder,
@@ -179,19 +165,18 @@ export const AggregatorFilter = memo(function AggregatorFilter({
[getAttributesData, handleChangeCustomValue, onChange],
);
const value = removePrefix(
transformStringWithPrefix({
str: query.aggregateAttribute.key,
prefix: query.aggregateAttribute.type || '',
condition: !query.aggregateAttribute.isColumn,
}),
);
const value = transformStringWithPrefix({
str: query.aggregateAttribute.key,
prefix: query.aggregateAttribute.type || '',
condition: !query.aggregateAttribute.isColumn,
});
return (
<AutoComplete
getPopupContainer={popupContainer}
placeholder={placeholder}
style={selectStyle}
showArrow={false}
filterOption={false}
onSearch={handleSearchText}
notFoundContent={isFetching ? <Spin size="small" /> : null}

View File

@@ -14,16 +14,14 @@ import { chooseAutocompleteFromCustomValue } from 'lib/newQueryBuilder/chooseAut
// ** Helpers
import { transformStringWithPrefix } from 'lib/query/transformStringWithPrefix';
import { isEqual, uniqWith } from 'lodash-es';
import { memo, ReactNode, useCallback, useEffect, useState } from 'react';
import { memo, useCallback, useEffect, useState } from 'react';
import { useQueryClient } from 'react-query';
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
import { SelectOption } from 'types/common/select';
import { popupContainer } from 'utils/selectPopupContainer';
import { selectStyle } from '../QueryBuilderSearch/config';
import OptionRenderer from '../QueryBuilderSearch/OptionRenderer';
import { GroupByFilterProps } from './GroupByFilter.interfaces';
import { removePrefix } from './utils';
export const GroupByFilter = memo(function GroupByFilter({
query,
@@ -32,9 +30,9 @@ export const GroupByFilter = memo(function GroupByFilter({
}: GroupByFilterProps): JSX.Element {
const queryClient = useQueryClient();
const [searchText, setSearchText] = useState<string>('');
const [optionsData, setOptionsData] = useState<
SelectOption<string, ReactNode>[]
>([]);
const [optionsData, setOptionsData] = useState<SelectOption<string, string>[]>(
[],
);
const [localValues, setLocalValues] = useState<SelectOption<string, string>[]>(
[],
);
@@ -63,26 +61,13 @@ export const GroupByFilter = memo(function GroupByFilter({
(attrKey) => !keys.includes(attrKey.key),
) || [];
const options: SelectOption<string, ReactNode>[] =
const options: SelectOption<string, string>[] =
filteredOptions.map((item) => ({
label: (
<OptionRenderer
key={item.key}
label={transformStringWithPrefix({
str: item.key,
prefix: item.type || '',
condition: !item.isColumn,
})}
value={removePrefix(
transformStringWithPrefix({
str: item.key,
prefix: item.type || '',
condition: !item.isColumn,
}),
)}
dataType={item.dataType || ''}
/>
),
label: transformStringWithPrefix({
str: item.key,
prefix: item.type || '',
condition: !item.isColumn,
}),
value: `${transformStringWithPrefix({
str: item.key,
prefix: item.type || '',
@@ -167,13 +152,11 @@ export const GroupByFilter = memo(function GroupByFilter({
useEffect(() => {
const currentValues: SelectOption<string, string>[] = query.groupBy.map(
(item) => ({
label: `${removePrefix(
transformStringWithPrefix({
str: item.key,
prefix: item.type || '',
condition: !item.isColumn,
}),
)}`,
label: `${transformStringWithPrefix({
str: item.key,
prefix: item.type || '',
condition: !item.isColumn,
})}`,
value: `${transformStringWithPrefix({
str: item.key,
prefix: item.type || '',
@@ -193,6 +176,7 @@ export const GroupByFilter = memo(function GroupByFilter({
onSearch={handleSearchKeys}
showSearch
disabled={disabled}
showArrow={false}
filterOption={false}
onBlur={handleBlur}
onFocus={handleFocus}

View File

@@ -1,14 +0,0 @@
import { MetricsType } from 'container/MetricsApplication/constant';
export function removePrefix(str: string): string {
const tagPrefix = `${MetricsType.Tag}_`;
const resourcePrefix = `${MetricsType.Resource}_`;
if (str.startsWith(tagPrefix)) {
return str.slice(tagPrefix.length);
}
if (str.startsWith(resourcePrefix)) {
return str.slice(resourcePrefix.length);
}
return str;
}

View File

@@ -1,45 +0,0 @@
import {
SelectOptionContainer,
TagContainer,
TagLabel,
TagValue,
} from './style';
import { getOptionType } from './utils';
function OptionRenderer({
label,
value,
dataType,
}: OptionRendererProps): JSX.Element {
const optionType = getOptionType(label);
return (
<span>
{optionType ? (
<SelectOptionContainer>
<div>{value}</div>
<div>
<TagContainer>
<TagLabel>Type: </TagLabel>
<TagValue>{optionType}</TagValue>
</TagContainer>
<TagContainer>
<TagLabel>Data type: </TagLabel>
<TagValue>{dataType}</TagValue>
</TagContainer>
</div>
</SelectOptionContainer>
) : (
<span>{label}</span>
)}
</span>
);
}
interface OptionRendererProps {
label: string;
value: string;
dataType: string;
}
export default OptionRenderer;

View File

@@ -28,7 +28,6 @@ import { v4 as uuid } from 'uuid';
import { selectStyle } from './config';
import { PLACEHOLDER } from './constant';
import OptionRenderer from './OptionRenderer';
import { StyledCheckOutlined, TypographyText } from './style';
import {
getOperatorValue,
@@ -206,11 +205,7 @@ function QueryBuilderSearch({
>
{options.map((option) => (
<Select.Option key={option.label} value={option.value}>
<OptionRenderer
label={option.label}
value={option.value}
dataType={option.dataType || ''}
/>
{option.label}
{option.selected && <StyledCheckOutlined />}
</Select.Option>
))}

View File

@@ -1,5 +1,5 @@
import { CheckOutlined } from '@ant-design/icons';
import { Tag, Typography } from 'antd';
import { Typography } from 'antd';
import styled from 'styled-components';
export const TypographyText = styled(Typography.Text)<{
@@ -15,27 +15,3 @@ export const TypographyText = styled(Typography.Text)<{
export const StyledCheckOutlined = styled(CheckOutlined)`
float: right;
`;
export const SelectOptionContainer = styled.div`
display: flex;
justify-content: space-between;
align-items: center;
`;
export const TagContainer = styled(Tag)`
&&& {
border-radius: 0.25rem;
padding: 0.063rem 0.5rem;
font-weight: 600;
font-size: 0.75rem;
line-height: 1.25rem;
}
`;
export const TagLabel = styled.span`
font-weight: 400;
`;
export const TagValue = styled.span`
text-transform: capitalize;
`;

Some files were not shown because too many files have changed in this diff Show More