mirror of
https://github.com/SigNoz/signoz.git
synced 2026-04-16 17:00:28 +01:00
Compare commits
26 Commits
infraM/v2_
...
base-path-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5729a4584a | ||
|
|
825d06249d | ||
|
|
9034471587 | ||
|
|
4cc23ead6b | ||
|
|
867e27d45f | ||
|
|
c24579be12 | ||
|
|
be37e588f8 | ||
|
|
057dcbe6e4 | ||
|
|
3a28d741a3 | ||
|
|
223e83154f | ||
|
|
50ae51cdaa | ||
|
|
c8ae8476c3 | ||
|
|
daaa66e1fc | ||
|
|
b0717d6a69 | ||
|
|
4aefe44313 | ||
|
|
4dc6f6fe7b | ||
|
|
d3e0c46ba2 | ||
|
|
0fed17e11a | ||
|
|
a2264b4960 | ||
|
|
2740964106 | ||
|
|
0ca22dd7fe | ||
|
|
a3b6bddac8 | ||
|
|
d908ce321a | ||
|
|
c221a44f3d | ||
|
|
22fb4daaf9 | ||
|
|
1bdc059d76 |
@@ -75,7 +75,7 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
|
||||
},
|
||||
signoz.NewEmailingProviderFactories(),
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
signoz.NewWebProviderFactories(config.Global),
|
||||
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
|
||||
return signoz.NewSQLSchemaProviderFactories(sqlstore)
|
||||
},
|
||||
|
||||
@@ -96,7 +96,7 @@ func runServer(ctx context.Context, config signoz.Config, logger *slog.Logger) e
|
||||
},
|
||||
signoz.NewEmailingProviderFactories(),
|
||||
signoz.NewCacheProviderFactories(),
|
||||
signoz.NewWebProviderFactories(),
|
||||
signoz.NewWebProviderFactories(config.Global),
|
||||
func(sqlstore sqlstore.SQLStore) factory.NamedMap[factory.ProviderFactory[sqlschema.SQLSchema, sqlschema.Config]] {
|
||||
existingFactories := signoz.NewSQLSchemaProviderFactories(sqlstore)
|
||||
if err := existingFactories.Add(postgressqlschema.NewFactory(sqlstore)); err != nil {
|
||||
|
||||
@@ -6,6 +6,8 @@
|
||||
##################### Global #####################
|
||||
global:
|
||||
# the url under which the signoz apiserver is externally reachable.
|
||||
# the path component (e.g. /signoz in https://example.com/signoz) is used
|
||||
# as the base path for all HTTP routes (both API and web frontend).
|
||||
external_url: <unset>
|
||||
# the url where the SigNoz backend receives telemetry data (traces, metrics, logs) from instrumented applications.
|
||||
ingestion_url: <unset>
|
||||
@@ -50,8 +52,8 @@ pprof:
|
||||
web:
|
||||
# Whether to enable the web frontend
|
||||
enabled: true
|
||||
# The prefix to serve web on
|
||||
prefix: /
|
||||
# The index file to use as the SPA entrypoint.
|
||||
index: index.html
|
||||
# The directory containing the static build files.
|
||||
directory: /etc/signoz/web
|
||||
|
||||
|
||||
@@ -2287,98 +2287,6 @@ components:
|
||||
enabled:
|
||||
type: boolean
|
||||
type: object
|
||||
InframonitoringtypesHostFilter:
|
||||
properties:
|
||||
expression:
|
||||
type: string
|
||||
filterByStatus:
|
||||
$ref: '#/components/schemas/InframonitoringtypesHostStatus'
|
||||
type: object
|
||||
InframonitoringtypesHostRecord:
|
||||
properties:
|
||||
cpu:
|
||||
format: double
|
||||
type: number
|
||||
diskUsage:
|
||||
format: double
|
||||
type: number
|
||||
hostName:
|
||||
type: string
|
||||
load15:
|
||||
format: double
|
||||
type: number
|
||||
memory:
|
||||
format: double
|
||||
type: number
|
||||
meta:
|
||||
additionalProperties: {}
|
||||
nullable: true
|
||||
type: object
|
||||
status:
|
||||
$ref: '#/components/schemas/InframonitoringtypesHostStatus'
|
||||
wait:
|
||||
format: double
|
||||
type: number
|
||||
type: object
|
||||
InframonitoringtypesHostStatus:
|
||||
enum:
|
||||
- active
|
||||
- inactive
|
||||
- ""
|
||||
type: string
|
||||
InframonitoringtypesHostsListRequest:
|
||||
properties:
|
||||
end:
|
||||
format: int64
|
||||
type: integer
|
||||
filter:
|
||||
$ref: '#/components/schemas/InframonitoringtypesHostFilter'
|
||||
groupBy:
|
||||
items:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5GroupByKey'
|
||||
nullable: true
|
||||
type: array
|
||||
limit:
|
||||
type: integer
|
||||
offset:
|
||||
type: integer
|
||||
orderBy:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5OrderBy'
|
||||
start:
|
||||
format: int64
|
||||
type: integer
|
||||
type: object
|
||||
InframonitoringtypesHostsListResponse:
|
||||
properties:
|
||||
endTimeBeforeRetention:
|
||||
type: boolean
|
||||
records:
|
||||
items:
|
||||
$ref: '#/components/schemas/InframonitoringtypesHostRecord'
|
||||
nullable: true
|
||||
type: array
|
||||
requiredMetricsCheck:
|
||||
$ref: '#/components/schemas/InframonitoringtypesRequiredMetricsCheck'
|
||||
total:
|
||||
type: integer
|
||||
type:
|
||||
$ref: '#/components/schemas/InframonitoringtypesResponseType'
|
||||
warning:
|
||||
$ref: '#/components/schemas/Querybuildertypesv5QueryWarnData'
|
||||
type: object
|
||||
InframonitoringtypesRequiredMetricsCheck:
|
||||
properties:
|
||||
missingMetrics:
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
type: object
|
||||
InframonitoringtypesResponseType:
|
||||
enum:
|
||||
- list
|
||||
- grouped_list
|
||||
type: string
|
||||
MetricsexplorertypesInspectMetricsRequest:
|
||||
properties:
|
||||
end:
|
||||
@@ -9241,72 +9149,6 @@ paths:
|
||||
summary: Health check
|
||||
tags:
|
||||
- health
|
||||
/api/v2/infra_monitoring/hosts:
|
||||
post:
|
||||
deprecated: false
|
||||
description: 'Returns a paginated list of hosts with key infrastructure metrics:
|
||||
CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute
|
||||
load average. Each host includes its current status (active/inactive based
|
||||
on metrics reported in the last 10 minutes) and metadata attributes (e.g.,
|
||||
os.type). Supports filtering via a filter expression, filtering by host status,
|
||||
custom groupBy to aggregate hosts by any attribute, ordering by any of the
|
||||
five metrics, and pagination via offset/limit. The response type is ''list''
|
||||
for the default host.name grouping or ''grouped_list'' for custom groupBy
|
||||
keys. Also reports missing required metrics and whether the requested time
|
||||
range falls before the data retention boundary.'
|
||||
operationId: HostsList
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/InframonitoringtypesHostsListRequest'
|
||||
responses:
|
||||
"200":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/InframonitoringtypesHostsListResponse'
|
||||
status:
|
||||
type: string
|
||||
required:
|
||||
- status
|
||||
- data
|
||||
type: object
|
||||
description: OK
|
||||
"400":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Bad Request
|
||||
"401":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Unauthorized
|
||||
"403":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Forbidden
|
||||
"500":
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/RenderErrorResponse'
|
||||
description: Internal Server Error
|
||||
security:
|
||||
- api_key:
|
||||
- VIEWER
|
||||
- tokenizer:
|
||||
- VIEWER
|
||||
summary: List Hosts for Infra Monitoring
|
||||
tags:
|
||||
- infra-monitoring
|
||||
/api/v2/livez:
|
||||
get:
|
||||
deprecated: false
|
||||
|
||||
@@ -7,12 +7,12 @@ This guide explains how to add new data sources to the SigNoz onboarding flow. T
|
||||
The configuration is located at:
|
||||
|
||||
```
|
||||
frontend/src/container/OnboardingV2Container/onboarding-configs/onboarding-config-with-links.json
|
||||
frontend/src/container/OnboardingV2Container/onboarding-configs/onboarding-config-with-links.ts
|
||||
```
|
||||
|
||||
## JSON Structure Overview
|
||||
## Structure Overview
|
||||
|
||||
The configuration file is a JSON array containing data source objects. Each object represents a selectable option in the onboarding flow.
|
||||
The configuration file exports a TypeScript array (`onboardingConfigWithLinks`) containing data source objects. Each object represents a selectable option in the onboarding flow. SVG logos are imported as ES modules at the top of the file.
|
||||
|
||||
## Data Source Object Keys
|
||||
|
||||
@@ -24,7 +24,7 @@ The configuration file is a JSON array containing data source objects. Each obje
|
||||
| `label` | `string` | Display name shown to users (e.g., `"AWS EC2"`) |
|
||||
| `tags` | `string[]` | Array of category tags for grouping (e.g., `["AWS"]`, `["database"]`) |
|
||||
| `module` | `string` | Destination module after onboarding completion |
|
||||
| `imgUrl` | `string` | Path to the logo/icon **(SVG required)** (e.g., `"/Logos/ec2.svg"`) |
|
||||
| `imgUrl` | `string` | Imported SVG URL **(SVG required)** (e.g., `import ec2Url from '@/assets/Logos/ec2.svg'`, then use `ec2Url`) |
|
||||
|
||||
### Optional Keys
|
||||
|
||||
@@ -57,36 +57,34 @@ The `module` key determines where users are redirected after completing onboardi
|
||||
|
||||
The `question` object enables multi-step selection flows:
|
||||
|
||||
```json
|
||||
{
|
||||
"question": {
|
||||
"desc": "What would you like to monitor?",
|
||||
"type": "select",
|
||||
"helpText": "Choose the telemetry type you want to collect.",
|
||||
"helpLink": "/docs/azure-monitoring/overview/",
|
||||
"helpLinkText": "Read the guide →",
|
||||
"options": [
|
||||
{
|
||||
"key": "logging",
|
||||
"label": "Logs",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/logging/"
|
||||
},
|
||||
{
|
||||
"key": "metrics",
|
||||
"label": "Metrics",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/metrics/"
|
||||
},
|
||||
{
|
||||
"key": "tracing",
|
||||
"label": "Traces",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/tracing/"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```ts
|
||||
question: {
|
||||
desc: 'What would you like to monitor?',
|
||||
type: 'select',
|
||||
helpText: 'Choose the telemetry type you want to collect.',
|
||||
helpLink: '/docs/azure-monitoring/overview/',
|
||||
helpLinkText: 'Read the guide →',
|
||||
options: [
|
||||
{
|
||||
key: 'logging',
|
||||
label: 'Logs',
|
||||
imgUrl: azureVmUrl,
|
||||
link: '/docs/azure-monitoring/app-service/logging/',
|
||||
},
|
||||
{
|
||||
key: 'metrics',
|
||||
label: 'Metrics',
|
||||
imgUrl: azureVmUrl,
|
||||
link: '/docs/azure-monitoring/app-service/metrics/',
|
||||
},
|
||||
{
|
||||
key: 'tracing',
|
||||
label: 'Traces',
|
||||
imgUrl: azureVmUrl,
|
||||
link: '/docs/azure-monitoring/app-service/tracing/',
|
||||
},
|
||||
],
|
||||
},
|
||||
```
|
||||
|
||||
### Question Keys
|
||||
@@ -106,152 +104,161 @@ Options can be simple (direct link) or nested (with another question):
|
||||
|
||||
### Simple Option (Direct Link)
|
||||
|
||||
```json
|
||||
```ts
|
||||
{
|
||||
"key": "aws-ec2-logs",
|
||||
"label": "Logs",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"link": "/docs/userguide/collect_logs_from_file/"
|
||||
}
|
||||
key: 'aws-ec2-logs',
|
||||
label: 'Logs',
|
||||
imgUrl: ec2Url,
|
||||
link: '/docs/userguide/collect_logs_from_file/',
|
||||
},
|
||||
```
|
||||
|
||||
### Option with Internal Redirect
|
||||
|
||||
```json
|
||||
```ts
|
||||
{
|
||||
"key": "aws-ec2-metrics-one-click",
|
||||
"label": "One Click AWS",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"link": "/integrations?integration=aws-integration&service=ec2",
|
||||
"internalRedirect": true
|
||||
}
|
||||
key: 'aws-ec2-metrics-one-click',
|
||||
label: 'One Click AWS',
|
||||
imgUrl: ec2Url,
|
||||
link: '/integrations?integration=aws-integration&service=ec2',
|
||||
internalRedirect: true,
|
||||
},
|
||||
```
|
||||
|
||||
> **Important**: Set `internalRedirect: true` only for internal app routes (like `/integrations?...`). Docs links should NOT have this flag.
|
||||
|
||||
### Nested Option (Multi-step Flow)
|
||||
|
||||
```json
|
||||
```ts
|
||||
{
|
||||
"key": "aws-ec2-metrics",
|
||||
"label": "Metrics",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"question": {
|
||||
"desc": "How would you like to set up monitoring?",
|
||||
"helpText": "Choose your setup method.",
|
||||
"options": [...]
|
||||
}
|
||||
}
|
||||
key: 'aws-ec2-metrics',
|
||||
label: 'Metrics',
|
||||
imgUrl: ec2Url,
|
||||
question: {
|
||||
desc: 'How would you like to set up monitoring?',
|
||||
helpText: 'Choose your setup method.',
|
||||
options: [...],
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Simple Data Source (Direct Link)
|
||||
|
||||
```json
|
||||
```ts
|
||||
import elbUrl from '@/assets/Logos/elb.svg';
|
||||
|
||||
// inside the onboardingConfigWithLinks array:
|
||||
{
|
||||
"dataSource": "aws-elb",
|
||||
"label": "AWS ELB",
|
||||
"tags": ["AWS"],
|
||||
"module": "logs",
|
||||
"relatedSearchKeywords": [
|
||||
"aws",
|
||||
"aws elb",
|
||||
"elb logs",
|
||||
"elastic load balancer"
|
||||
dataSource: 'aws-elb',
|
||||
label: 'AWS ELB',
|
||||
tags: ['AWS'],
|
||||
module: 'logs',
|
||||
relatedSearchKeywords: [
|
||||
'aws',
|
||||
'aws elb',
|
||||
'elb logs',
|
||||
'elastic load balancer',
|
||||
],
|
||||
"imgUrl": "/Logos/elb.svg",
|
||||
"link": "/docs/aws-monitoring/elb/"
|
||||
}
|
||||
imgUrl: elbUrl,
|
||||
link: '/docs/aws-monitoring/elb/',
|
||||
},
|
||||
```
|
||||
|
||||
### Data Source with Single Question Level
|
||||
|
||||
```json
|
||||
```ts
|
||||
import azureVmUrl from '@/assets/Logos/azure-vm.svg';
|
||||
|
||||
// inside the onboardingConfigWithLinks array:
|
||||
{
|
||||
"dataSource": "app-service",
|
||||
"label": "App Service",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"tags": ["Azure"],
|
||||
"module": "apm",
|
||||
"relatedSearchKeywords": ["azure", "app service"],
|
||||
"question": {
|
||||
"desc": "What telemetry data do you want to visualise?",
|
||||
"type": "select",
|
||||
"options": [
|
||||
dataSource: 'app-service',
|
||||
label: 'App Service',
|
||||
imgUrl: azureVmUrl,
|
||||
tags: ['Azure'],
|
||||
module: 'apm',
|
||||
relatedSearchKeywords: ['azure', 'app service'],
|
||||
question: {
|
||||
desc: 'What telemetry data do you want to visualise?',
|
||||
type: 'select',
|
||||
options: [
|
||||
{
|
||||
"key": "logging",
|
||||
"label": "Logs",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/logging/"
|
||||
key: 'logging',
|
||||
label: 'Logs',
|
||||
imgUrl: azureVmUrl,
|
||||
link: '/docs/azure-monitoring/app-service/logging/',
|
||||
},
|
||||
{
|
||||
"key": "metrics",
|
||||
"label": "Metrics",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/metrics/"
|
||||
key: 'metrics',
|
||||
label: 'Metrics',
|
||||
imgUrl: azureVmUrl,
|
||||
link: '/docs/azure-monitoring/app-service/metrics/',
|
||||
},
|
||||
{
|
||||
"key": "tracing",
|
||||
"label": "Traces",
|
||||
"imgUrl": "/Logos/azure-vm.svg",
|
||||
"link": "/docs/azure-monitoring/app-service/tracing/"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
key: 'tracing',
|
||||
label: 'Traces',
|
||||
imgUrl: azureVmUrl,
|
||||
link: '/docs/azure-monitoring/app-service/tracing/',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
### Data Source with Nested Questions (2-3 Levels)
|
||||
|
||||
```json
|
||||
```ts
|
||||
import ec2Url from '@/assets/Logos/ec2.svg';
|
||||
|
||||
// inside the onboardingConfigWithLinks array:
|
||||
{
|
||||
"dataSource": "aws-ec2",
|
||||
"label": "AWS EC2",
|
||||
"tags": ["AWS"],
|
||||
"module": "logs",
|
||||
"relatedSearchKeywords": ["aws", "aws ec2", "ec2 logs", "ec2 metrics"],
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"question": {
|
||||
"desc": "What would you like to monitor for AWS EC2?",
|
||||
"type": "select",
|
||||
"helpText": "Choose the type of telemetry data you want to collect.",
|
||||
"options": [
|
||||
dataSource: 'aws-ec2',
|
||||
label: 'AWS EC2',
|
||||
tags: ['AWS'],
|
||||
module: 'logs',
|
||||
relatedSearchKeywords: ['aws', 'aws ec2', 'ec2 logs', 'ec2 metrics'],
|
||||
imgUrl: ec2Url,
|
||||
question: {
|
||||
desc: 'What would you like to monitor for AWS EC2?',
|
||||
type: 'select',
|
||||
helpText: 'Choose the type of telemetry data you want to collect.',
|
||||
options: [
|
||||
{
|
||||
"key": "aws-ec2-logs",
|
||||
"label": "Logs",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"link": "/docs/userguide/collect_logs_from_file/"
|
||||
key: 'aws-ec2-logs',
|
||||
label: 'Logs',
|
||||
imgUrl: ec2Url,
|
||||
link: '/docs/userguide/collect_logs_from_file/',
|
||||
},
|
||||
{
|
||||
"key": "aws-ec2-metrics",
|
||||
"label": "Metrics",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"question": {
|
||||
"desc": "How would you like to set up EC2 Metrics monitoring?",
|
||||
"helpText": "One Click uses AWS CloudWatch integration. Manual setup uses OpenTelemetry.",
|
||||
"helpLink": "/docs/aws-monitoring/one-click-vs-manual/",
|
||||
"helpLinkText": "Read the comparison guide →",
|
||||
"options": [
|
||||
key: 'aws-ec2-metrics',
|
||||
label: 'Metrics',
|
||||
imgUrl: ec2Url,
|
||||
question: {
|
||||
desc: 'How would you like to set up EC2 Metrics monitoring?',
|
||||
helpText: 'One Click uses AWS CloudWatch integration. Manual setup uses OpenTelemetry.',
|
||||
helpLink: '/docs/aws-monitoring/one-click-vs-manual/',
|
||||
helpLinkText: 'Read the comparison guide →',
|
||||
options: [
|
||||
{
|
||||
"key": "aws-ec2-metrics-one-click",
|
||||
"label": "One Click AWS",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"link": "/integrations?integration=aws-integration&service=ec2",
|
||||
"internalRedirect": true
|
||||
key: 'aws-ec2-metrics-one-click',
|
||||
label: 'One Click AWS',
|
||||
imgUrl: ec2Url,
|
||||
link: '/integrations?integration=aws-integration&service=ec2',
|
||||
internalRedirect: true,
|
||||
},
|
||||
{
|
||||
"key": "aws-ec2-metrics-manual",
|
||||
"label": "Manual Setup",
|
||||
"imgUrl": "/Logos/ec2.svg",
|
||||
"link": "/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
key: 'aws-ec2-metrics-manual',
|
||||
label: 'Manual Setup',
|
||||
imgUrl: ec2Url,
|
||||
link: '/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
@@ -270,11 +277,16 @@ Options can be simple (direct link) or nested (with another question):
|
||||
|
||||
### 3. Logos
|
||||
|
||||
- Place logo files in `public/Logos/`
|
||||
- Place logo files in `src/assets/Logos/`
|
||||
- Use SVG format
|
||||
- Reference as `"/Logos/your-logo.svg"`
|
||||
- Import the SVG at the top of the file and reference the imported variable:
|
||||
```ts
|
||||
import myServiceUrl from '@/assets/Logos/my-service.svg';
|
||||
// then in the config object:
|
||||
imgUrl: myServiceUrl,
|
||||
```
|
||||
- **Fetching Icons**: New icons can be easily fetched from [OpenBrand](https://openbrand.sh/). Use the pattern `https://openbrand.sh/?url=<TARGET_URL>`, where `<TARGET_URL>` is the URL-encoded link to the service's website. For example, to get Render's logo, use [https://openbrand.sh/?url=https%3A%2F%2Frender.com](https://openbrand.sh/?url=https%3A%2F%2Frender.com).
|
||||
- **Optimize new SVGs**: Run any newly downloaded SVGs through an optimizer like [SVGOMG (svgo)](https://svgomg.net/) or use `npx svgo public/Logos/your-logo.svg` to minimise their size before committing.
|
||||
- **Optimize new SVGs**: Run any newly downloaded SVGs through an optimizer like [SVGOMG (svgo)](https://svgomg.net/) or use `npx svgo src/assets/Logos/your-logo.svg` to minimise their size before committing.
|
||||
|
||||
### 4. Links
|
||||
|
||||
@@ -290,8 +302,8 @@ Options can be simple (direct link) or nested (with another question):
|
||||
|
||||
## Adding a New Data Source
|
||||
|
||||
1. Add your data source object to the JSON array
|
||||
2. Ensure the logo exists in `public/Logos/`
|
||||
1. Add the logo SVG to `src/assets/Logos/` and add a top-level import in the config file (e.g., `import myServiceUrl from '@/assets/Logos/my-service.svg'`)
|
||||
2. Add your data source object to the `onboardingConfigWithLinks` array, referencing the imported variable for `imgUrl`
|
||||
3. Test the flow locally with `yarn dev`
|
||||
4. Validation:
|
||||
- Navigate to the [onboarding page](http://localhost:3301/get-started-with-signoz-cloud) on your local machine
|
||||
|
||||
@@ -262,6 +262,20 @@ func (s *Server) createPublicServer(apiHandler *api.APIHandler, web web.Web) (*h
|
||||
return nil, err
|
||||
}
|
||||
|
||||
routePrefix := s.config.Global.ExternalPath()
|
||||
if routePrefix != "" {
|
||||
prefixed := http.StripPrefix(routePrefix, handler)
|
||||
handler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
switch req.URL.Path {
|
||||
case "/api/v1/health", "/api/v2/healthz", "/api/v2/readyz", "/api/v2/livez":
|
||||
r.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
prefixed.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}, nil
|
||||
|
||||
5
frontend/.gitignore
vendored
5
frontend/.gitignore
vendored
@@ -28,4 +28,7 @@ e2e/test-plan/saved-views/
|
||||
e2e/test-plan/service-map/
|
||||
e2e/test-plan/services/
|
||||
e2e/test-plan/traces/
|
||||
e2e/test-plan/user-preferences/
|
||||
e2e/test-plan/user-preferences/
|
||||
|
||||
# Generated by `vite build` — do not commit
|
||||
index.html.gotmpl
|
||||
@@ -2,6 +2,7 @@
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<base href="[[.BasePath]]" />
|
||||
<meta
|
||||
http-equiv="Cache-Control"
|
||||
content="no-cache, no-store, must-revalidate, max-age: 0"
|
||||
@@ -59,7 +60,7 @@
|
||||
<meta data-react-helmet="true" name="docusaurus_locale" content="en" />
|
||||
<meta data-react-helmet="true" name="docusaurus_tag" content="default" />
|
||||
<meta name="robots" content="noindex" />
|
||||
<link data-react-helmet="true" rel="shortcut icon" href="/favicon.ico" />
|
||||
<link data-react-helmet="true" rel="shortcut icon" href="favicon.ico" />
|
||||
</head>
|
||||
<body data-theme="default">
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
@@ -113,7 +114,7 @@
|
||||
})(document, 'script');
|
||||
}
|
||||
</script>
|
||||
<link rel="stylesheet" href="/css/uPlot.min.css" />
|
||||
<link rel="stylesheet" href="css/uPlot.min.css" />
|
||||
<script type="module" src="./src/index.tsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
/**
|
||||
* ! Do not edit manually
|
||||
* * The file has been auto-generated using Orval for SigNoz
|
||||
* * regenerate with 'yarn generate:api'
|
||||
* SigNoz
|
||||
*/
|
||||
import type {
|
||||
MutationFunction,
|
||||
UseMutationOptions,
|
||||
UseMutationResult,
|
||||
} from 'react-query';
|
||||
import { useMutation } from 'react-query';
|
||||
|
||||
import type { BodyType, ErrorType } from '../../../generatedAPIInstance';
|
||||
import { GeneratedAPIInstance } from '../../../generatedAPIInstance';
|
||||
import type {
|
||||
HostsList200,
|
||||
InframonitoringtypesHostsListRequestDTO,
|
||||
RenderErrorResponseDTO,
|
||||
} from '../sigNoz.schemas';
|
||||
|
||||
/**
|
||||
* Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary.
|
||||
* @summary List Hosts for Infra Monitoring
|
||||
*/
|
||||
export const hostsList = (
|
||||
inframonitoringtypesHostsListRequestDTO: BodyType<InframonitoringtypesHostsListRequestDTO>,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
return GeneratedAPIInstance<HostsList200>({
|
||||
url: `/api/v2/infra_monitoring/hosts`,
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
data: inframonitoringtypesHostsListRequestDTO,
|
||||
signal,
|
||||
});
|
||||
};
|
||||
|
||||
export const getHostsListMutationOptions = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof hostsList>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesHostsListRequestDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationOptions<
|
||||
Awaited<ReturnType<typeof hostsList>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesHostsListRequestDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationKey = ['hostsList'];
|
||||
const { mutation: mutationOptions } = options
|
||||
? options.mutation &&
|
||||
'mutationKey' in options.mutation &&
|
||||
options.mutation.mutationKey
|
||||
? options
|
||||
: { ...options, mutation: { ...options.mutation, mutationKey } }
|
||||
: { mutation: { mutationKey } };
|
||||
|
||||
const mutationFn: MutationFunction<
|
||||
Awaited<ReturnType<typeof hostsList>>,
|
||||
{ data: BodyType<InframonitoringtypesHostsListRequestDTO> }
|
||||
> = (props) => {
|
||||
const { data } = props ?? {};
|
||||
|
||||
return hostsList(data);
|
||||
};
|
||||
|
||||
return { mutationFn, ...mutationOptions };
|
||||
};
|
||||
|
||||
export type HostsListMutationResult = NonNullable<
|
||||
Awaited<ReturnType<typeof hostsList>>
|
||||
>;
|
||||
export type HostsListMutationBody = BodyType<InframonitoringtypesHostsListRequestDTO>;
|
||||
export type HostsListMutationError = ErrorType<RenderErrorResponseDTO>;
|
||||
|
||||
/**
|
||||
* @summary List Hosts for Infra Monitoring
|
||||
*/
|
||||
export const useHostsList = <
|
||||
TError = ErrorType<RenderErrorResponseDTO>,
|
||||
TContext = unknown
|
||||
>(options?: {
|
||||
mutation?: UseMutationOptions<
|
||||
Awaited<ReturnType<typeof hostsList>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesHostsListRequestDTO> },
|
||||
TContext
|
||||
>;
|
||||
}): UseMutationResult<
|
||||
Awaited<ReturnType<typeof hostsList>>,
|
||||
TError,
|
||||
{ data: BodyType<InframonitoringtypesHostsListRequestDTO> },
|
||||
TContext
|
||||
> => {
|
||||
const mutationOptions = getHostsListMutationOptions(options);
|
||||
|
||||
return useMutation(mutationOptions);
|
||||
};
|
||||
@@ -3051,123 +3051,6 @@ export interface GlobaltypesTokenizerConfigDTO {
|
||||
enabled?: boolean;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesHostFilterDTO {
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
expression?: string;
|
||||
filterByStatus?: InframonitoringtypesHostStatusDTO;
|
||||
}
|
||||
|
||||
/**
|
||||
* @nullable
|
||||
*/
|
||||
export type InframonitoringtypesHostRecordDTOMeta = {
|
||||
[key: string]: unknown;
|
||||
} | null;
|
||||
|
||||
export interface InframonitoringtypesHostRecordDTO {
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
cpu?: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
diskUsage?: number;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
hostName?: string;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
load15?: number;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
memory?: number;
|
||||
/**
|
||||
* @type object
|
||||
* @nullable true
|
||||
*/
|
||||
meta?: InframonitoringtypesHostRecordDTOMeta;
|
||||
status?: InframonitoringtypesHostStatusDTO;
|
||||
/**
|
||||
* @type number
|
||||
* @format double
|
||||
*/
|
||||
wait?: number;
|
||||
}
|
||||
|
||||
export enum InframonitoringtypesHostStatusDTO {
|
||||
active = 'active',
|
||||
inactive = 'inactive',
|
||||
'' = '',
|
||||
}
|
||||
export interface InframonitoringtypesHostsListRequestDTO {
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
end?: number;
|
||||
filter?: InframonitoringtypesHostFilterDTO;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
groupBy?: Querybuildertypesv5GroupByKeyDTO[] | null;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
limit?: number;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
offset?: number;
|
||||
orderBy?: Querybuildertypesv5OrderByDTO;
|
||||
/**
|
||||
* @type integer
|
||||
* @format int64
|
||||
*/
|
||||
start?: number;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesHostsListResponseDTO {
|
||||
/**
|
||||
* @type boolean
|
||||
*/
|
||||
endTimeBeforeRetention?: boolean;
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
records?: InframonitoringtypesHostRecordDTO[] | null;
|
||||
requiredMetricsCheck?: InframonitoringtypesRequiredMetricsCheckDTO;
|
||||
/**
|
||||
* @type integer
|
||||
*/
|
||||
total?: number;
|
||||
type?: InframonitoringtypesResponseTypeDTO;
|
||||
warning?: Querybuildertypesv5QueryWarnDataDTO;
|
||||
}
|
||||
|
||||
export interface InframonitoringtypesRequiredMetricsCheckDTO {
|
||||
/**
|
||||
* @type array
|
||||
* @nullable true
|
||||
*/
|
||||
missingMetrics?: string[] | null;
|
||||
}
|
||||
|
||||
export enum InframonitoringtypesResponseTypeDTO {
|
||||
list = 'list',
|
||||
grouped_list = 'grouped_list',
|
||||
}
|
||||
export interface MetricsexplorertypesInspectMetricsRequestDTO {
|
||||
/**
|
||||
* @type integer
|
||||
@@ -6188,14 +6071,6 @@ export type Healthz503 = {
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type HostsList200 = {
|
||||
data: InframonitoringtypesHostsListResponseDTO;
|
||||
/**
|
||||
* @type string
|
||||
*/
|
||||
status: string;
|
||||
};
|
||||
|
||||
export type Livez200 = {
|
||||
data: FactoryResponseDTO;
|
||||
/**
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import {
|
||||
interceptorRejected,
|
||||
interceptorsRequestBasePath,
|
||||
interceptorsRequestResponse,
|
||||
interceptorsResponse,
|
||||
} from 'api';
|
||||
@@ -17,6 +18,7 @@ export const GeneratedAPIInstance = <T>(
|
||||
return generatedAPIAxiosInstance({ ...config }).then(({ data }) => data);
|
||||
};
|
||||
|
||||
generatedAPIAxiosInstance.interceptors.request.use(interceptorsRequestBasePath);
|
||||
generatedAPIAxiosInstance.interceptors.request.use(interceptorsRequestResponse);
|
||||
generatedAPIAxiosInstance.interceptors.response.use(
|
||||
interceptorsResponse,
|
||||
|
||||
@@ -11,6 +11,7 @@ import axios, {
|
||||
import { ENVIRONMENT } from 'constants/env';
|
||||
import { Events } from 'constants/events';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { getBasePath } from 'utils/getBasePath';
|
||||
import { eventEmitter } from 'utils/getEventEmitter';
|
||||
|
||||
import apiV1, { apiAlertManager, apiV2, apiV3, apiV4, apiV5 } from './apiV1';
|
||||
@@ -67,6 +68,28 @@ export const interceptorsRequestResponse = (
|
||||
return value;
|
||||
};
|
||||
|
||||
// Prepends the runtime base path to outgoing requests so API calls work under
|
||||
// a URL prefix (e.g. /signoz/api/v1/…). No-op for root deployments and dev
|
||||
// (dev baseURL is a full http:// URL, not an absolute path).
|
||||
export const interceptorsRequestBasePath = (
|
||||
value: InternalAxiosRequestConfig,
|
||||
): InternalAxiosRequestConfig => {
|
||||
const basePath = getBasePath();
|
||||
if (basePath === '/') {
|
||||
return value;
|
||||
}
|
||||
|
||||
if (value.baseURL?.startsWith('/')) {
|
||||
// Named instances: baseURL='/api/v1/' → '/signoz/api/v1/'
|
||||
value.baseURL = basePath + value.baseURL.slice(1);
|
||||
} else if (!value.baseURL && value.url?.startsWith('/')) {
|
||||
// Generated instance: baseURL is '' in prod, path is in url
|
||||
value.url = basePath + value.url.slice(1);
|
||||
}
|
||||
|
||||
return value;
|
||||
};
|
||||
|
||||
export const interceptorRejected = async (
|
||||
value: AxiosResponse<any>,
|
||||
): Promise<AxiosResponse<any>> => {
|
||||
@@ -133,6 +156,7 @@ const instance = axios.create({
|
||||
});
|
||||
|
||||
instance.interceptors.request.use(interceptorsRequestResponse);
|
||||
instance.interceptors.request.use(interceptorsRequestBasePath);
|
||||
instance.interceptors.response.use(interceptorsResponse, interceptorRejected);
|
||||
|
||||
export const AxiosAlertManagerInstance = axios.create({
|
||||
@@ -147,6 +171,7 @@ ApiV2Instance.interceptors.response.use(
|
||||
interceptorRejected,
|
||||
);
|
||||
ApiV2Instance.interceptors.request.use(interceptorsRequestResponse);
|
||||
ApiV2Instance.interceptors.request.use(interceptorsRequestBasePath);
|
||||
|
||||
// axios V3
|
||||
export const ApiV3Instance = axios.create({
|
||||
@@ -158,6 +183,7 @@ ApiV3Instance.interceptors.response.use(
|
||||
interceptorRejected,
|
||||
);
|
||||
ApiV3Instance.interceptors.request.use(interceptorsRequestResponse);
|
||||
ApiV3Instance.interceptors.request.use(interceptorsRequestBasePath);
|
||||
//
|
||||
|
||||
// axios V4
|
||||
@@ -170,6 +196,7 @@ ApiV4Instance.interceptors.response.use(
|
||||
interceptorRejected,
|
||||
);
|
||||
ApiV4Instance.interceptors.request.use(interceptorsRequestResponse);
|
||||
ApiV4Instance.interceptors.request.use(interceptorsRequestBasePath);
|
||||
//
|
||||
|
||||
// axios V5
|
||||
@@ -182,6 +209,7 @@ ApiV5Instance.interceptors.response.use(
|
||||
interceptorRejected,
|
||||
);
|
||||
ApiV5Instance.interceptors.request.use(interceptorsRequestResponse);
|
||||
ApiV5Instance.interceptors.request.use(interceptorsRequestBasePath);
|
||||
//
|
||||
|
||||
// axios Base
|
||||
@@ -194,6 +222,7 @@ LogEventAxiosInstance.interceptors.response.use(
|
||||
interceptorRejectedBase,
|
||||
);
|
||||
LogEventAxiosInstance.interceptors.request.use(interceptorsRequestResponse);
|
||||
LogEventAxiosInstance.interceptors.request.use(interceptorsRequestBasePath);
|
||||
//
|
||||
|
||||
AxiosAlertManagerInstance.interceptors.response.use(
|
||||
@@ -201,6 +230,7 @@ AxiosAlertManagerInstance.interceptors.response.use(
|
||||
interceptorRejected,
|
||||
);
|
||||
AxiosAlertManagerInstance.interceptors.request.use(interceptorsRequestResponse);
|
||||
AxiosAlertManagerInstance.interceptors.request.use(interceptorsRequestBasePath);
|
||||
|
||||
export { apiV1 };
|
||||
export default instance;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { createBrowserHistory } from 'history';
|
||||
import { getBasePath } from 'utils/getBasePath';
|
||||
|
||||
export default createBrowserHistory();
|
||||
export default createBrowserHistory({ basename: getBasePath() });
|
||||
|
||||
@@ -2,6 +2,7 @@ import { useCallback } from 'react';
|
||||
import { Button } from 'antd';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { useGetTenantLicense } from 'hooks/useGetTenantLicense';
|
||||
import history from 'lib/history';
|
||||
import { Home, LifeBuoy } from 'lucide-react';
|
||||
import { handleContactSupport } from 'pages/Integrations/utils';
|
||||
|
||||
@@ -11,8 +12,9 @@ import './ErrorBoundaryFallback.styles.scss';
|
||||
|
||||
function ErrorBoundaryFallback(): JSX.Element {
|
||||
const handleReload = (): void => {
|
||||
// Go to home page
|
||||
window.location.href = ROUTES.HOME;
|
||||
// Use history.push so the navigation stays within the base path prefix
|
||||
// (window.location.href would strip any /signoz/ prefix).
|
||||
history.push(ROUTES.HOME);
|
||||
};
|
||||
|
||||
const { isCloudUser: isCloudUserVal } = useGetTenantLicense();
|
||||
|
||||
50
frontend/src/utils/__tests__/getBasePath.test.ts
Normal file
50
frontend/src/utils/__tests__/getBasePath.test.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
import { getBasePath } from 'utils/getBasePath';
|
||||
|
||||
/**
|
||||
* Contract tests for getBasePath().
|
||||
*
|
||||
* These lock down the exact DOM-reading contract so that any future change to
|
||||
* the utility (or to how index.html injects the <base> tag) surfaces
|
||||
* immediately as a test failure.
|
||||
*/
|
||||
describe('getBasePath', () => {
|
||||
afterEach(() => {
|
||||
// Remove any <base> elements added during the test.
|
||||
document.head.querySelectorAll('base').forEach((el) => el.remove());
|
||||
});
|
||||
|
||||
it('returns the href from the <base> tag when present', () => {
|
||||
const base = document.createElement('base');
|
||||
base.setAttribute('href', '/signoz/');
|
||||
document.head.appendChild(base);
|
||||
|
||||
expect(getBasePath()).toBe('/signoz/');
|
||||
});
|
||||
|
||||
it('returns "/" when no <base> tag exists in the document', () => {
|
||||
expect(getBasePath()).toBe('/');
|
||||
});
|
||||
|
||||
it('returns "/" when the <base> tag has no href attribute', () => {
|
||||
const base = document.createElement('base');
|
||||
document.head.appendChild(base);
|
||||
|
||||
expect(getBasePath()).toBe('/');
|
||||
});
|
||||
|
||||
it('returns the href unchanged when it already has a trailing slash', () => {
|
||||
const base = document.createElement('base');
|
||||
base.setAttribute('href', '/my/nested/path/');
|
||||
document.head.appendChild(base);
|
||||
|
||||
expect(getBasePath()).toBe('/my/nested/path/');
|
||||
});
|
||||
|
||||
it('appends a trailing slash when the href is missing one', () => {
|
||||
const base = document.createElement('base');
|
||||
base.setAttribute('href', '/signoz');
|
||||
document.head.appendChild(base);
|
||||
|
||||
expect(getBasePath()).toBe('/signoz/');
|
||||
});
|
||||
});
|
||||
17
frontend/src/utils/getBasePath.ts
Normal file
17
frontend/src/utils/getBasePath.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
/**
|
||||
* Returns the base path for this SigNoz deployment by reading the
|
||||
* `<base href>` element injected into index.html by the Go backend at
|
||||
* serve time.
|
||||
*
|
||||
* Always returns a string ending with `/` (e.g. `/`, `/signoz/`).
|
||||
* Falls back to `/` when no `<base>` element is present so the app
|
||||
* behaves correctly in local Vite dev and unit-test environments.
|
||||
*
|
||||
* @internal — consume through `src/lib/history` and the axios interceptor;
|
||||
* do not read `<base>` directly anywhere else in the codebase.
|
||||
*/
|
||||
export function getBasePath(): string {
|
||||
const href = document.querySelector('base')?.getAttribute('href') ?? '/';
|
||||
// Trailing slash is required for relative asset resolution and API prefixing.
|
||||
return href.endsWith('/') ? href : `${href}/`;
|
||||
}
|
||||
@@ -10,6 +10,18 @@ import { createHtmlPlugin } from 'vite-plugin-html';
|
||||
import { ViteImageOptimizer } from 'vite-plugin-image-optimizer';
|
||||
import tsconfigPaths from 'vite-tsconfig-paths';
|
||||
|
||||
// In dev the Go backend is not involved, so replace the [[.BasePath]] placeholder
|
||||
// with "/" so relative assets resolve correctly from the Vite dev server.
|
||||
function devBasePathPlugin(): Plugin {
|
||||
return {
|
||||
name: 'dev-base-path',
|
||||
apply: 'serve',
|
||||
transformIndexHtml(html): string {
|
||||
return html.replace('[[.BasePath]]', '/');
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function rawMarkdownPlugin(): Plugin {
|
||||
return {
|
||||
name: 'raw-markdown',
|
||||
@@ -32,6 +44,7 @@ export default defineConfig(
|
||||
const plugins = [
|
||||
tsconfigPaths(),
|
||||
rawMarkdownPlugin(),
|
||||
devBasePathPlugin(),
|
||||
react(),
|
||||
createHtmlPlugin({
|
||||
inject: {
|
||||
@@ -124,6 +137,7 @@ export default defineConfig(
|
||||
'process.env.TUNNEL_DOMAIN': JSON.stringify(env.VITE_TUNNEL_DOMAIN),
|
||||
'process.env.DOCS_BASE_URL': JSON.stringify(env.VITE_DOCS_BASE_URL),
|
||||
},
|
||||
base: './',
|
||||
build: {
|
||||
sourcemap: true,
|
||||
outDir: 'build',
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package signozapiserver
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/http/handler"
|
||||
"github.com/SigNoz/signoz/pkg/types"
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func (provider *provider) addInfraMonitoringRoutes(router *mux.Router) error {
|
||||
if err := router.Handle("/api/v2/infra_monitoring/hosts", handler.New(
|
||||
provider.authZ.ViewAccess(provider.infraMonitoringHandler.HostsList),
|
||||
handler.OpenAPIDef{
|
||||
ID: "HostsList",
|
||||
Tags: []string{"infra-monitoring"},
|
||||
Summary: "List Hosts for Infra Monitoring",
|
||||
Description: "Returns a paginated list of hosts with key infrastructure metrics: CPU usage (%), memory usage (%), I/O wait (%), disk usage (%), and 15-minute load average. Each host includes its current status (active/inactive based on metrics reported in the last 10 minutes) and metadata attributes (e.g., os.type). Supports filtering via a filter expression, filtering by host status, custom groupBy to aggregate hosts by any attribute, ordering by any of the five metrics, and pagination via offset/limit. The response type is 'list' for the default host.name grouping or 'grouped_list' for custom groupBy keys. Also reports missing required metrics and whether the requested time range falls before the data retention boundary.",
|
||||
Request: new(inframonitoringtypes.HostsListRequest),
|
||||
RequestContentType: "application/json",
|
||||
Response: new(inframonitoringtypes.HostsListResponse),
|
||||
ResponseContentType: "application/json",
|
||||
SuccessStatusCode: http.StatusOK,
|
||||
ErrorStatusCodes: []int{http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError},
|
||||
Deprecated: false,
|
||||
SecuritySchemes: newSecuritySchemes(types.RoleViewer),
|
||||
})).Methods(http.MethodPost).GetError(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/fields"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
"github.com/SigNoz/signoz/pkg/modules/promote"
|
||||
@@ -49,7 +48,6 @@ type provider struct {
|
||||
dashboardModule dashboard.Module
|
||||
dashboardHandler dashboard.Handler
|
||||
metricsExplorerHandler metricsexplorer.Handler
|
||||
infraMonitoringHandler inframonitoring.Handler
|
||||
gatewayHandler gateway.Handler
|
||||
fieldsHandler fields.Handler
|
||||
authzHandler authz.Handler
|
||||
@@ -77,7 +75,6 @@ func NewFactory(
|
||||
dashboardModule dashboard.Module,
|
||||
dashboardHandler dashboard.Handler,
|
||||
metricsExplorerHandler metricsexplorer.Handler,
|
||||
infraMonitoringHandler inframonitoring.Handler,
|
||||
gatewayHandler gateway.Handler,
|
||||
fieldsHandler fields.Handler,
|
||||
authzHandler authz.Handler,
|
||||
@@ -108,7 +105,6 @@ func NewFactory(
|
||||
dashboardModule,
|
||||
dashboardHandler,
|
||||
metricsExplorerHandler,
|
||||
infraMonitoringHandler,
|
||||
gatewayHandler,
|
||||
fieldsHandler,
|
||||
authzHandler,
|
||||
@@ -141,7 +137,6 @@ func newProvider(
|
||||
dashboardModule dashboard.Module,
|
||||
dashboardHandler dashboard.Handler,
|
||||
metricsExplorerHandler metricsexplorer.Handler,
|
||||
infraMonitoringHandler inframonitoring.Handler,
|
||||
gatewayHandler gateway.Handler,
|
||||
fieldsHandler fields.Handler,
|
||||
authzHandler authz.Handler,
|
||||
@@ -172,7 +167,6 @@ func newProvider(
|
||||
dashboardModule: dashboardModule,
|
||||
dashboardHandler: dashboardHandler,
|
||||
metricsExplorerHandler: metricsExplorerHandler,
|
||||
infraMonitoringHandler: infraMonitoringHandler,
|
||||
gatewayHandler: gatewayHandler,
|
||||
fieldsHandler: fieldsHandler,
|
||||
authzHandler: authzHandler,
|
||||
@@ -240,10 +234,6 @@ func (provider *provider) AddToRouter(router *mux.Router) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := provider.addInfraMonitoringRoutes(router); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := provider.addGatewayRoutes(router); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,8 @@ package global
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
|
||||
@@ -37,5 +39,34 @@ func newConfig() factory.Config {
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
if c.ExternalURL != nil {
|
||||
if c.ExternalURL.Path != "" && c.ExternalURL.Path != "/" {
|
||||
if !strings.HasPrefix(c.ExternalURL.Path, "/") {
|
||||
return errors.NewInvalidInputf(ErrCodeInvalidGlobalConfig, "global::external_url path must start with '/', got %q", c.ExternalURL.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Config) ExternalPath() string {
|
||||
if c.ExternalURL == nil || c.ExternalURL.Path == "" || c.ExternalURL.Path == "/" {
|
||||
return ""
|
||||
}
|
||||
|
||||
p := path.Clean("/" + c.ExternalURL.Path)
|
||||
if p == "/" {
|
||||
return ""
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
func (c Config) ExternalPathTrailing() string {
|
||||
if p := c.ExternalPath(); p != "" {
|
||||
return p + "/"
|
||||
}
|
||||
|
||||
return "/"
|
||||
}
|
||||
|
||||
139
pkg/global/config_test.go
Normal file
139
pkg/global/config_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package global
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestExternalPath(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
config Config
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "NilURL",
|
||||
config: Config{ExternalURL: nil},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "EmptyPath",
|
||||
config: Config{ExternalURL: &url.URL{Scheme: "https", Host: "example.com", Path: ""}},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "RootPath",
|
||||
config: Config{ExternalURL: &url.URL{Scheme: "https", Host: "example.com", Path: "/"}},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "SingleSegment",
|
||||
config: Config{ExternalURL: &url.URL{Scheme: "https", Host: "example.com", Path: "/signoz"}},
|
||||
expected: "/signoz",
|
||||
},
|
||||
{
|
||||
name: "TrailingSlash",
|
||||
config: Config{ExternalURL: &url.URL{Scheme: "https", Host: "example.com", Path: "/signoz/"}},
|
||||
expected: "/signoz",
|
||||
},
|
||||
{
|
||||
name: "MultiSegment",
|
||||
config: Config{ExternalURL: &url.URL{Scheme: "https", Host: "example.com", Path: "/a/b/c"}},
|
||||
expected: "/a/b/c",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.expected, tc.config.ExternalPath())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestExternalPathTrailing(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
config Config
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "NilURL",
|
||||
config: Config{ExternalURL: nil},
|
||||
expected: "/",
|
||||
},
|
||||
{
|
||||
name: "EmptyPath",
|
||||
config: Config{ExternalURL: &url.URL{Path: ""}},
|
||||
expected: "/",
|
||||
},
|
||||
{
|
||||
name: "RootPath",
|
||||
config: Config{ExternalURL: &url.URL{Path: "/"}},
|
||||
expected: "/",
|
||||
},
|
||||
{
|
||||
name: "SingleSegment",
|
||||
config: Config{ExternalURL: &url.URL{Path: "/signoz"}},
|
||||
expected: "/signoz/",
|
||||
},
|
||||
{
|
||||
name: "MultiSegment",
|
||||
config: Config{ExternalURL: &url.URL{Path: "/a/b/c"}},
|
||||
expected: "/a/b/c/",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.expected, tc.config.ExternalPathTrailing())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
config Config
|
||||
fail bool
|
||||
}{
|
||||
{
|
||||
name: "NilURL",
|
||||
config: Config{ExternalURL: nil},
|
||||
fail: false,
|
||||
},
|
||||
{
|
||||
name: "EmptyPath",
|
||||
config: Config{ExternalURL: &url.URL{Path: ""}},
|
||||
fail: false,
|
||||
},
|
||||
{
|
||||
name: "RootPath",
|
||||
config: Config{ExternalURL: &url.URL{Path: "/"}},
|
||||
fail: false,
|
||||
},
|
||||
{
|
||||
name: "ValidPath",
|
||||
config: Config{ExternalURL: &url.URL{Path: "/signoz"}},
|
||||
fail: false,
|
||||
},
|
||||
{
|
||||
name: "NoLeadingSlash",
|
||||
config: Config{ExternalURL: &url.URL{Path: "signoz"}},
|
||||
fail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
err := tc.config.Validate()
|
||||
if tc.fail {
|
||||
assert.Error(t, err)
|
||||
return
|
||||
}
|
||||
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package inframonitoring
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
TelemetryStore TelemetryStoreConfig `mapstructure:"telemetrystore"`
|
||||
}
|
||||
|
||||
type TelemetryStoreConfig struct {
|
||||
Threads int `mapstructure:"threads"`
|
||||
}
|
||||
|
||||
func NewConfigFactory() factory.ConfigFactory {
|
||||
return factory.NewConfigFactory(factory.MustNewName("inframonitoring"), newConfig)
|
||||
}
|
||||
|
||||
func newConfig() factory.Config {
|
||||
return Config{
|
||||
TelemetryStore: TelemetryStoreConfig{
|
||||
Threads: 8,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c Config) Validate() error {
|
||||
if c.TelemetryStore.Threads <= 0 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "inframonitoring.telemetrystore.threads must be positive, got %d", c.TelemetryStore.Threads)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,277 +0,0 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/querybuilder"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
func (m *module) buildFilterClause(ctx context.Context, filter *qbtypes.Filter, startMillis, endMillis int64) (*sqlbuilder.WhereClause, error) {
|
||||
expression := ""
|
||||
if filter != nil {
|
||||
expression = strings.TrimSpace(filter.Expression)
|
||||
}
|
||||
if expression == "" {
|
||||
return sqlbuilder.NewWhereClause(), nil
|
||||
}
|
||||
|
||||
whereClauseSelectors := querybuilder.QueryStringToKeysSelectors(expression)
|
||||
for idx := range whereClauseSelectors {
|
||||
whereClauseSelectors[idx].Signal = telemetrytypes.SignalMetrics
|
||||
whereClauseSelectors[idx].SelectorMatchType = telemetrytypes.FieldSelectorMatchTypeExact
|
||||
}
|
||||
|
||||
keys, _, err := m.telemetryMetadataStore.GetKeysMulti(ctx, whereClauseSelectors)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts := querybuilder.FilterExprVisitorOpts{
|
||||
Context: ctx,
|
||||
Logger: m.logger,
|
||||
FieldMapper: m.fieldMapper,
|
||||
ConditionBuilder: m.condBuilder,
|
||||
FullTextColumn: &telemetrytypes.TelemetryFieldKey{Name: "metric_name", FieldContext: telemetrytypes.FieldContextMetric},
|
||||
FieldKeys: keys,
|
||||
StartNs: querybuilder.ToNanoSecs(uint64(startMillis)),
|
||||
EndNs: querybuilder.ToNanoSecs(uint64(endMillis)),
|
||||
}
|
||||
|
||||
whereClause, err := querybuilder.PrepareWhereClause(expression, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if whereClause == nil || whereClause.WhereClause == nil {
|
||||
return sqlbuilder.NewWhereClause(), nil
|
||||
}
|
||||
|
||||
return whereClause.WhereClause, nil
|
||||
}
|
||||
|
||||
// NOTE: this method is not specific to infra monitoring — it queries attributes_metadata generically.
|
||||
// Consider moving to telemetryMetaStore when a second use case emerges.
|
||||
//
|
||||
// getMetricsExistenceAndEarliestTime checks which of the given metric names have been
|
||||
// reported. It returns a list of missing metrics (those not found or with zero count)
|
||||
// and the earliest first-reported timestamp across all present metrics.
|
||||
// When all metrics are missing, minFirstReportedUnixMilli is 0.
|
||||
// TODO(nikhilmantri0902, srikanthccv): This method was designed this way because querier errors if any of the metrics
|
||||
// in the querier list was never sent, the QueryRange call throws not found error. Modify this method, if QueryRange
|
||||
// behaviour changes towards this.
|
||||
func (m *module) getMetricsExistenceAndEarliestTime(ctx context.Context, metricNames []string) ([]string, uint64, error) {
|
||||
if len(metricNames) == 0 {
|
||||
return nil, 0, nil
|
||||
}
|
||||
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
sb.Select("metric_name", "count(*) AS cnt", "min(first_reported_unix_milli) AS min_first_reported")
|
||||
sb.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, telemetrymetrics.AttributesMetadataTableName))
|
||||
sb.Where(sb.In("metric_name", sqlbuilder.List(metricNames)))
|
||||
sb.GroupBy("metric_name")
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
type metricInfo struct {
|
||||
count uint64
|
||||
minFirstReported uint64
|
||||
}
|
||||
found := make(map[string]metricInfo, len(metricNames))
|
||||
|
||||
for rows.Next() {
|
||||
var name string
|
||||
var cnt, minFR uint64
|
||||
if err := rows.Scan(&name, &cnt, &minFR); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
found[name] = metricInfo{count: cnt, minFirstReported: minFR}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
var missingMetrics []string
|
||||
var globalMinFirstReported uint64
|
||||
for _, name := range metricNames {
|
||||
info, ok := found[name]
|
||||
if !ok || info.count == 0 {
|
||||
missingMetrics = append(missingMetrics, name)
|
||||
continue
|
||||
}
|
||||
if globalMinFirstReported == 0 || info.minFirstReported < globalMinFirstReported {
|
||||
globalMinFirstReported = info.minFirstReported
|
||||
}
|
||||
}
|
||||
|
||||
return missingMetrics, globalMinFirstReported, nil
|
||||
}
|
||||
|
||||
// getMetadata fetches the latest values of additionalCols for each unique combination of groupBy keys,
|
||||
// within the given time range and metric names. It uses argMax(tuple(...), unix_milli) to ensure
|
||||
// we always pick attribute values from the latest timestamp for each group.
|
||||
// The returned map has a composite key of groupBy column values joined by "\x00" (null byte),
|
||||
// mapping to a flat map of attr_name -> attr_value (includes both groupBy and additional cols).
|
||||
func (m *module) getMetadata(
|
||||
ctx context.Context,
|
||||
metricNames []string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
additionalCols []string,
|
||||
filter *qbtypes.Filter,
|
||||
startMs, endMs int64,
|
||||
) (map[string]map[string]string, error) {
|
||||
if len(metricNames) == 0 {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "metricNames must not be empty")
|
||||
}
|
||||
if len(groupBy) == 0 {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "groupBy must not be empty")
|
||||
}
|
||||
|
||||
// Pick the optimal timeseries table based on time range; also get adjusted start.
|
||||
adjustedStart, adjustedEnd, distributedTableName, _ := telemetrymetrics.WhichTSTableToUse(
|
||||
uint64(startMs), uint64(endMs), nil,
|
||||
)
|
||||
|
||||
// Build a fingerprint subquery against the samples table using the original
|
||||
// (non-adjusted) time range. The time_series tables are ReplacingMergeTrees
|
||||
// with bucketed granularity, so WhichTSTableToUse widens the window — this
|
||||
// subquery restricts to fingerprints actually active in the requested range.
|
||||
samplesTableName := telemetrymetrics.WhichSamplesTableToUse(
|
||||
uint64(startMs), uint64(endMs),
|
||||
metrictypes.UnspecifiedType,
|
||||
metrictypes.TimeAggregationUnspecified,
|
||||
nil,
|
||||
)
|
||||
localSamplesTable := strings.TrimPrefix(samplesTableName, "distributed_")
|
||||
fpSB := sqlbuilder.NewSelectBuilder()
|
||||
fpSB.Select("DISTINCT fingerprint")
|
||||
fpSB.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, localSamplesTable))
|
||||
fpSB.Where(
|
||||
fpSB.In("metric_name", sqlbuilder.List(metricNames)),
|
||||
fpSB.GE("unix_milli", startMs),
|
||||
fpSB.L("unix_milli", endMs),
|
||||
)
|
||||
|
||||
// Flatten groupBy keys to string names for SQL expressions and result scanning.
|
||||
groupByCols := make([]string, len(groupBy))
|
||||
for i, key := range groupBy {
|
||||
groupByCols[i] = key.Name
|
||||
}
|
||||
allCols := append(groupByCols, additionalCols...)
|
||||
|
||||
// --- Build inner query ---
|
||||
innerSB := sqlbuilder.NewSelectBuilder()
|
||||
|
||||
// Inner SELECT columns: JSONExtractString for each groupBy col + argMax(tuple(...)) for additional cols
|
||||
innerSelectCols := make([]string, 0, len(groupByCols)+1)
|
||||
for _, col := range groupByCols {
|
||||
innerSelectCols = append(innerSelectCols,
|
||||
fmt.Sprintf("JSONExtractString(labels, %s) AS %s", innerSB.Var(col), quoteIdentifier(col)),
|
||||
)
|
||||
}
|
||||
|
||||
// Build the argMax(tuple(...), unix_milli) expression for all additional cols
|
||||
if len(additionalCols) > 0 {
|
||||
tupleArgs := make([]string, 0, len(additionalCols))
|
||||
for _, col := range additionalCols {
|
||||
tupleArgs = append(tupleArgs, fmt.Sprintf("JSONExtractString(labels, %s)", innerSB.Var(col)))
|
||||
}
|
||||
innerSelectCols = append(innerSelectCols,
|
||||
fmt.Sprintf("argMax(tuple(%s), unix_milli) AS latest_attrs", strings.Join(tupleArgs, ", ")),
|
||||
)
|
||||
}
|
||||
|
||||
innerSB.Select(innerSelectCols...)
|
||||
innerSB.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, distributedTableName))
|
||||
innerSB.Where(
|
||||
innerSB.In("metric_name", sqlbuilder.List(metricNames)),
|
||||
innerSB.GE("unix_milli", adjustedStart),
|
||||
innerSB.L("unix_milli", adjustedEnd),
|
||||
fmt.Sprintf("fingerprint IN (%s)", innerSB.Var(fpSB)),
|
||||
)
|
||||
|
||||
// Apply optional filter expression
|
||||
if filter != nil && strings.TrimSpace(filter.Expression) != "" {
|
||||
filterClause, err := m.buildFilterClause(ctx, filter, startMs, endMs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if filterClause != nil {
|
||||
innerSB.AddWhereClause(filterClause)
|
||||
}
|
||||
}
|
||||
|
||||
groupByAliases := make([]string, 0, len(groupByCols))
|
||||
for _, col := range groupByCols {
|
||||
groupByAliases = append(groupByAliases, quoteIdentifier(col))
|
||||
}
|
||||
innerSB.GroupBy(groupByAliases...)
|
||||
|
||||
innerQuery, innerArgs := innerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
// --- Build outer query ---
|
||||
// Outer SELECT columns: groupBy cols directly + tupleElement(latest_attrs, N) for each additionalCol
|
||||
outerSelectCols := make([]string, 0, len(allCols))
|
||||
for _, col := range groupByCols {
|
||||
outerSelectCols = append(outerSelectCols, quoteIdentifier(col))
|
||||
}
|
||||
for i, col := range additionalCols {
|
||||
outerSelectCols = append(outerSelectCols,
|
||||
fmt.Sprintf("tupleElement(latest_attrs, %d) AS %s", i+1, quoteIdentifier(col)),
|
||||
)
|
||||
}
|
||||
|
||||
outerSB := sqlbuilder.NewSelectBuilder()
|
||||
outerSB.Select(outerSelectCols...)
|
||||
outerSB.From(fmt.Sprintf("(%s)", innerQuery))
|
||||
|
||||
outerQuery, _ := outerSB.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
// All ? params are in innerArgs; outer query introduces none of its own.
|
||||
|
||||
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, outerQuery, innerArgs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
result := make(map[string]map[string]string)
|
||||
|
||||
for rows.Next() {
|
||||
row := make([]string, len(allCols))
|
||||
scanPtrs := make([]any, len(row))
|
||||
for i := range row {
|
||||
scanPtrs[i] = &row[i]
|
||||
}
|
||||
|
||||
if err := rows.Scan(scanPtrs...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
compositeKey := compositeKeyFromList(row[:len(groupByCols)])
|
||||
|
||||
attrMap := make(map[string]string, len(allCols))
|
||||
for i, col := range allCols {
|
||||
attrMap[col] = row[i]
|
||||
}
|
||||
result[compositeKey] = attrMap
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/http/binding"
|
||||
"github.com/SigNoz/signoz/pkg/http/render"
|
||||
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
|
||||
"github.com/SigNoz/signoz/pkg/types/authtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type handler struct {
|
||||
module inframonitoring.Module
|
||||
}
|
||||
|
||||
// NewHandler returns an inframonitoring.Handler implementation.
|
||||
func NewHandler(m inframonitoring.Module) inframonitoring.Handler {
|
||||
return &handler{
|
||||
module: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handler) HostsList(rw http.ResponseWriter, req *http.Request) {
|
||||
claims, err := authtypes.ClaimsFromContext(req.Context())
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
orgID := valuer.MustNewUUID(claims.OrgID)
|
||||
|
||||
var parsedReq inframonitoringtypes.HostsListRequest
|
||||
if err := binding.JSON.BindBody(req.Body, &parsedReq); err != nil {
|
||||
render.Error(rw, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "failed to parse request body"))
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.module.HostsList(req.Context(), orgID, &parsedReq)
|
||||
if err != nil {
|
||||
render.Error(rw, err)
|
||||
return
|
||||
}
|
||||
|
||||
render.Success(rw, http.StatusOK, result)
|
||||
}
|
||||
@@ -1,292 +0,0 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
)
|
||||
|
||||
// quoteIdentifier wraps s in backticks for use as a ClickHouse identifier,
|
||||
// escaping any embedded backticks by doubling them.
|
||||
func quoteIdentifier(s string) string {
|
||||
return fmt.Sprintf("`%s`", strings.ReplaceAll(s, "`", "``"))
|
||||
}
|
||||
|
||||
type rankedGroup struct {
|
||||
labels map[string]string
|
||||
value float64
|
||||
}
|
||||
|
||||
func isKeyInGroupByAttrs(groupByAttrs []qbtypes.GroupByKey, key string) bool {
|
||||
for _, groupBy := range groupByAttrs {
|
||||
if groupBy.Name == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func mergeFilterExpressions(queryFilterExpr, reqFilterExpr string) string {
|
||||
queryFilterExpr = strings.TrimSpace(queryFilterExpr)
|
||||
reqFilterExpr = strings.TrimSpace(reqFilterExpr)
|
||||
if queryFilterExpr == "" {
|
||||
return reqFilterExpr
|
||||
}
|
||||
if reqFilterExpr == "" {
|
||||
return queryFilterExpr
|
||||
}
|
||||
return fmt.Sprintf("(%s) AND (%s)", queryFilterExpr, reqFilterExpr)
|
||||
}
|
||||
|
||||
// compositeKeyFromList builds a composite key by joining the given parts
|
||||
// with a null byte separator. This is the canonical way to construct
|
||||
// composite keys for group identification across the infra monitoring module.
|
||||
func compositeKeyFromList(parts []string) string {
|
||||
return strings.Join(parts, "\x00")
|
||||
}
|
||||
|
||||
// compositeKeyFromLabels builds a composite key from a label map by extracting
|
||||
// the value for each groupBy key in order and joining them via compositeKeyFromList.
|
||||
func compositeKeyFromLabels(labels map[string]string, groupBy []qbtypes.GroupByKey) string {
|
||||
parts := make([]string, len(groupBy))
|
||||
for i, key := range groupBy {
|
||||
parts[i] = labels[key.Name]
|
||||
}
|
||||
return compositeKeyFromList(parts)
|
||||
}
|
||||
|
||||
// parseAndSortGroups extracts group label maps from a ScalarData response and
|
||||
// sorts them by the ranking query's aggregation value.
|
||||
func parseAndSortGroups(
|
||||
resp *qbtypes.QueryRangeResponse,
|
||||
rankingQueryName string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
direction qbtypes.OrderDirection,
|
||||
) []rankedGroup {
|
||||
if resp == nil || len(resp.Data.Results) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find the ScalarData that contains the ranking column.
|
||||
var sd *qbtypes.ScalarData
|
||||
for _, r := range resp.Data.Results {
|
||||
candidate, ok := r.(*qbtypes.ScalarData)
|
||||
if !ok || candidate == nil {
|
||||
continue
|
||||
}
|
||||
for _, col := range candidate.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeAggregation && col.QueryName == rankingQueryName {
|
||||
sd = candidate
|
||||
break
|
||||
}
|
||||
}
|
||||
if sd != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if sd == nil || len(sd.Data) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
groupColIndices := make(map[string]int)
|
||||
rankingColIdx := -1
|
||||
for i, col := range sd.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
groupColIndices[col.Name] = i
|
||||
}
|
||||
if col.Type == qbtypes.ColumnTypeAggregation && col.QueryName == rankingQueryName {
|
||||
rankingColIdx = i
|
||||
}
|
||||
}
|
||||
if rankingColIdx == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
groups := make([]rankedGroup, 0, len(sd.Data))
|
||||
for _, row := range sd.Data {
|
||||
labels := make(map[string]string, len(groupBy))
|
||||
for _, key := range groupBy {
|
||||
if idx, ok := groupColIndices[key.Name]; ok && idx < len(row) {
|
||||
labels[key.Name] = fmt.Sprintf("%v", row[idx])
|
||||
}
|
||||
}
|
||||
var value float64
|
||||
if rankingColIdx < len(row) {
|
||||
if v, ok := row[rankingColIdx].(float64); ok {
|
||||
value = v
|
||||
}
|
||||
}
|
||||
groups = append(groups, rankedGroup{labels: labels, value: value})
|
||||
}
|
||||
|
||||
sort.Slice(groups, func(i, j int) bool {
|
||||
if direction == qbtypes.OrderDirectionAsc {
|
||||
return groups[i].value < groups[j].value
|
||||
}
|
||||
return groups[i].value > groups[j].value
|
||||
})
|
||||
|
||||
return groups
|
||||
}
|
||||
|
||||
// paginateWithBackfill returns the page of groups for [offset, offset+limit).
|
||||
// The virtual sorted list is: metric-ranked groups first, then metadata-only
|
||||
// groups (those in metadataMap but not in metric results) sorted alphabetically.
|
||||
func paginateWithBackfill(
|
||||
metricGroups []rankedGroup,
|
||||
metadataMap map[string]map[string]string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
offset, limit int,
|
||||
) []map[string]string {
|
||||
metricKeySet := make(map[string]bool, len(metricGroups))
|
||||
for _, g := range metricGroups {
|
||||
metricKeySet[compositeKeyFromLabels(g.labels, groupBy)] = true
|
||||
}
|
||||
|
||||
metadataOnlyKeys := make([]string, 0)
|
||||
for compositeKey := range metadataMap {
|
||||
if !metricKeySet[compositeKey] {
|
||||
metadataOnlyKeys = append(metadataOnlyKeys, compositeKey)
|
||||
}
|
||||
}
|
||||
sort.Strings(metadataOnlyKeys)
|
||||
|
||||
totalMetric := len(metricGroups)
|
||||
totalAll := totalMetric + len(metadataOnlyKeys)
|
||||
|
||||
end := offset + limit
|
||||
if end > totalAll {
|
||||
end = totalAll
|
||||
}
|
||||
if offset >= totalAll {
|
||||
return nil
|
||||
}
|
||||
|
||||
pageGroups := make([]map[string]string, 0, end-offset)
|
||||
for i := offset; i < end; i++ {
|
||||
if i < totalMetric {
|
||||
pageGroups = append(pageGroups, metricGroups[i].labels)
|
||||
} else {
|
||||
compositeKey := metadataOnlyKeys[i-totalMetric]
|
||||
attrs := metadataMap[compositeKey]
|
||||
labels := make(map[string]string, len(groupBy))
|
||||
for _, key := range groupBy {
|
||||
labels[key.Name] = attrs[key.Name]
|
||||
}
|
||||
pageGroups = append(pageGroups, labels)
|
||||
}
|
||||
}
|
||||
return pageGroups
|
||||
}
|
||||
|
||||
// buildFullQueryRequest creates a QueryRangeRequest for all metrics,
|
||||
// restricted to the given page of groups via an IN filter.
|
||||
// Accepts primitive fields so it can be reused across different v2 APIs
|
||||
// (hosts, pods, etc.).
|
||||
func buildFullQueryRequest(
|
||||
start int64,
|
||||
end int64,
|
||||
filterExpr string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
pageGroups []map[string]string,
|
||||
tableListQuery *qbtypes.QueryRangeRequest,
|
||||
) *qbtypes.QueryRangeRequest {
|
||||
groupValues := make(map[string][]string)
|
||||
for _, labels := range pageGroups {
|
||||
for k, v := range labels {
|
||||
groupValues[k] = append(groupValues[k], v)
|
||||
}
|
||||
}
|
||||
|
||||
inClauses := make([]string, 0, len(groupValues))
|
||||
for key, values := range groupValues {
|
||||
quoted := make([]string, len(values))
|
||||
for i, v := range values {
|
||||
quoted[i] = fmt.Sprintf("'%s'", v)
|
||||
}
|
||||
inClauses = append(inClauses, fmt.Sprintf("%s IN (%s)", key, strings.Join(quoted, ", ")))
|
||||
}
|
||||
inFilterExpr := strings.Join(inClauses, " AND ")
|
||||
|
||||
fullReq := &qbtypes.QueryRangeRequest{
|
||||
Start: uint64(start),
|
||||
End: uint64(end),
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: make([]qbtypes.QueryEnvelope, 0, len(tableListQuery.CompositeQuery.Queries)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, envelope := range tableListQuery.CompositeQuery.Queries {
|
||||
copied := envelope
|
||||
if copied.Type == qbtypes.QueryTypeBuilder {
|
||||
existingExpr := ""
|
||||
if f := copied.GetFilter(); f != nil {
|
||||
existingExpr = f.Expression
|
||||
}
|
||||
merged := mergeFilterExpressions(existingExpr, filterExpr)
|
||||
merged = mergeFilterExpressions(merged, inFilterExpr)
|
||||
copied.SetFilter(&qbtypes.Filter{Expression: merged})
|
||||
copied.SetGroupBy(groupBy)
|
||||
}
|
||||
fullReq.CompositeQuery.Queries = append(fullReq.CompositeQuery.Queries, copied)
|
||||
}
|
||||
|
||||
return fullReq
|
||||
}
|
||||
|
||||
// parseFullQueryResponse extracts per-group metric values from the full
|
||||
// composite query response. Returns compositeKey -> (queryName -> value).
|
||||
// Each enabled query/formula produces its own ScalarData entry in Results,
|
||||
// so we iterate over all of them and merge metrics per composite key.
|
||||
func parseFullQueryResponse(
|
||||
resp *qbtypes.QueryRangeResponse,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
) map[string]map[string]float64 {
|
||||
result := make(map[string]map[string]float64)
|
||||
if resp == nil || len(resp.Data.Results) == 0 {
|
||||
return result
|
||||
}
|
||||
|
||||
for _, r := range resp.Data.Results {
|
||||
sd, ok := r.(*qbtypes.ScalarData)
|
||||
if !ok || sd == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
groupColIndices := make(map[string]int)
|
||||
aggCols := make(map[int]string) // col index -> query name
|
||||
for i, col := range sd.Columns {
|
||||
if col.Type == qbtypes.ColumnTypeGroup {
|
||||
groupColIndices[col.Name] = i
|
||||
}
|
||||
if col.Type == qbtypes.ColumnTypeAggregation {
|
||||
aggCols[i] = col.QueryName
|
||||
}
|
||||
}
|
||||
|
||||
for _, row := range sd.Data {
|
||||
labels := make(map[string]string, len(groupBy))
|
||||
for _, key := range groupBy {
|
||||
if idx, ok := groupColIndices[key.Name]; ok && idx < len(row) {
|
||||
labels[key.Name] = fmt.Sprintf("%v", row[idx])
|
||||
}
|
||||
}
|
||||
compositeKey := compositeKeyFromLabels(labels, groupBy)
|
||||
|
||||
if result[compositeKey] == nil {
|
||||
result[compositeKey] = make(map[string]float64)
|
||||
}
|
||||
for idx, queryName := range aggCols {
|
||||
if idx < len(row) {
|
||||
if v, ok := row[idx].(float64); ok {
|
||||
result[compositeKey][queryName] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -1,283 +0,0 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
)
|
||||
|
||||
func groupByKey(name string) qbtypes.GroupByKey {
|
||||
return qbtypes.GroupByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{Name: name},
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsKeyInGroupByAttrs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
groupByAttrs []qbtypes.GroupByKey
|
||||
key string
|
||||
expectedFound bool
|
||||
}{
|
||||
{
|
||||
name: "key present in single-element list",
|
||||
groupByAttrs: []qbtypes.GroupByKey{groupByKey("host.name")},
|
||||
key: "host.name",
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "key present in multi-element list",
|
||||
groupByAttrs: []qbtypes.GroupByKey{
|
||||
groupByKey("host.name"),
|
||||
groupByKey("os.type"),
|
||||
groupByKey("k8s.cluster.name"),
|
||||
},
|
||||
key: "os.type",
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "key at last position",
|
||||
groupByAttrs: []qbtypes.GroupByKey{
|
||||
groupByKey("host.name"),
|
||||
groupByKey("os.type"),
|
||||
},
|
||||
key: "os.type",
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "key not in list",
|
||||
groupByAttrs: []qbtypes.GroupByKey{groupByKey("host.name")},
|
||||
key: "os.type",
|
||||
expectedFound: false,
|
||||
},
|
||||
{
|
||||
name: "empty group by list",
|
||||
groupByAttrs: []qbtypes.GroupByKey{},
|
||||
key: "host.name",
|
||||
expectedFound: false,
|
||||
},
|
||||
{
|
||||
name: "nil group by list",
|
||||
groupByAttrs: nil,
|
||||
key: "host.name",
|
||||
expectedFound: false,
|
||||
},
|
||||
{
|
||||
name: "empty key string",
|
||||
groupByAttrs: []qbtypes.GroupByKey{groupByKey("host.name")},
|
||||
key: "",
|
||||
expectedFound: false,
|
||||
},
|
||||
{
|
||||
name: "empty key matches empty-named group by key",
|
||||
groupByAttrs: []qbtypes.GroupByKey{groupByKey("")},
|
||||
key: "",
|
||||
expectedFound: true,
|
||||
},
|
||||
{
|
||||
name: "partial match does not count",
|
||||
groupByAttrs: []qbtypes.GroupByKey{
|
||||
groupByKey("host"),
|
||||
},
|
||||
key: "host.name",
|
||||
expectedFound: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := isKeyInGroupByAttrs(tt.groupByAttrs, tt.key)
|
||||
if got != tt.expectedFound {
|
||||
t.Errorf("isKeyInGroupByAttrs(%v, %q) = %v, want %v",
|
||||
tt.groupByAttrs, tt.key, got, tt.expectedFound)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeFilterExpressions(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
queryFilterExpr string
|
||||
reqFilterExpr string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "both non-empty",
|
||||
queryFilterExpr: "cpu > 50",
|
||||
reqFilterExpr: "host.name = 'web-1'",
|
||||
expected: "(cpu > 50) AND (host.name = 'web-1')",
|
||||
},
|
||||
{
|
||||
name: "query empty, req non-empty",
|
||||
queryFilterExpr: "",
|
||||
reqFilterExpr: "host.name = 'web-1'",
|
||||
expected: "host.name = 'web-1'",
|
||||
},
|
||||
{
|
||||
name: "query non-empty, req empty",
|
||||
queryFilterExpr: "cpu > 50",
|
||||
reqFilterExpr: "",
|
||||
expected: "cpu > 50",
|
||||
},
|
||||
{
|
||||
name: "both empty",
|
||||
queryFilterExpr: "",
|
||||
reqFilterExpr: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "whitespace-only query treated as empty",
|
||||
queryFilterExpr: " ",
|
||||
reqFilterExpr: "host.name = 'web-1'",
|
||||
expected: "host.name = 'web-1'",
|
||||
},
|
||||
{
|
||||
name: "whitespace-only req treated as empty",
|
||||
queryFilterExpr: "cpu > 50",
|
||||
reqFilterExpr: " ",
|
||||
expected: "cpu > 50",
|
||||
},
|
||||
{
|
||||
name: "both whitespace-only",
|
||||
queryFilterExpr: " ",
|
||||
reqFilterExpr: " ",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "leading/trailing whitespace trimmed before merge",
|
||||
queryFilterExpr: " cpu > 50 ",
|
||||
reqFilterExpr: " mem < 80 ",
|
||||
expected: "(cpu > 50) AND (mem < 80)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := mergeFilterExpressions(tt.queryFilterExpr, tt.reqFilterExpr)
|
||||
if got != tt.expected {
|
||||
t.Errorf("mergeFilterExpressions(%q, %q) = %q, want %q",
|
||||
tt.queryFilterExpr, tt.reqFilterExpr, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompositeKeyFromList(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
parts []string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "single part",
|
||||
parts: []string{"web-1"},
|
||||
expected: "web-1",
|
||||
},
|
||||
{
|
||||
name: "multiple parts joined with null separator",
|
||||
parts: []string{"web-1", "linux", "us-east"},
|
||||
expected: "web-1\x00linux\x00us-east",
|
||||
},
|
||||
{
|
||||
name: "empty slice returns empty string",
|
||||
parts: []string{},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "nil slice returns empty string",
|
||||
parts: nil,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "parts with empty strings",
|
||||
parts: []string{"web-1", "", "us-east"},
|
||||
expected: "web-1\x00\x00us-east",
|
||||
},
|
||||
{
|
||||
name: "all empty strings",
|
||||
parts: []string{"", ""},
|
||||
expected: "\x00",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := compositeKeyFromList(tt.parts)
|
||||
if got != tt.expected {
|
||||
t.Errorf("compositeKeyFromList(%v) = %q, want %q",
|
||||
tt.parts, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCompositeKeyFromLabels(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
labels map[string]string
|
||||
groupBy []qbtypes.GroupByKey
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "single group-by key",
|
||||
labels: map[string]string{"host.name": "web-1"},
|
||||
groupBy: []qbtypes.GroupByKey{groupByKey("host.name")},
|
||||
expected: "web-1",
|
||||
},
|
||||
{
|
||||
name: "multiple group-by keys joined with null separator",
|
||||
labels: map[string]string{
|
||||
"host.name": "web-1",
|
||||
"os.type": "linux",
|
||||
},
|
||||
groupBy: []qbtypes.GroupByKey{groupByKey("host.name"), groupByKey("os.type")},
|
||||
expected: "web-1\x00linux",
|
||||
},
|
||||
{
|
||||
name: "missing label yields empty segment",
|
||||
labels: map[string]string{"host.name": "web-1"},
|
||||
groupBy: []qbtypes.GroupByKey{groupByKey("host.name"), groupByKey("os.type")},
|
||||
expected: "web-1\x00",
|
||||
},
|
||||
{
|
||||
name: "empty labels map",
|
||||
labels: map[string]string{},
|
||||
groupBy: []qbtypes.GroupByKey{groupByKey("host.name")},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "empty group-by slice",
|
||||
labels: map[string]string{"host.name": "web-1"},
|
||||
groupBy: []qbtypes.GroupByKey{},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "nil labels map",
|
||||
labels: nil,
|
||||
groupBy: []qbtypes.GroupByKey{groupByKey("host.name")},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "order matches group-by order, not map iteration order",
|
||||
labels: map[string]string{
|
||||
"z": "last",
|
||||
"a": "first",
|
||||
"m": "middle",
|
||||
},
|
||||
groupBy: []qbtypes.GroupByKey{groupByKey("a"), groupByKey("m"), groupByKey("z")},
|
||||
expected: "first\x00middle\x00last",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := compositeKeyFromLabels(tt.labels, tt.groupBy)
|
||||
if got != tt.expected {
|
||||
t.Errorf("compositeKeyFromLabels(%v, %v) = %q, want %q",
|
||||
tt.labels, tt.groupBy, got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,494 +0,0 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/types/metrictypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/huandu/go-sqlbuilder"
|
||||
)
|
||||
|
||||
const (
|
||||
hostNameAttrKey = "host.name"
|
||||
)
|
||||
|
||||
// Helper group-by key used across all queries.
|
||||
var hostNameGroupByKey = qbtypes.GroupByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: hostNameAttrKey,
|
||||
FieldContext: telemetrytypes.FieldContextResource,
|
||||
FieldDataType: telemetrytypes.FieldDataTypeString,
|
||||
},
|
||||
}
|
||||
|
||||
var hostsTableMetricNamesList = []string{
|
||||
"system.cpu.time",
|
||||
"system.memory.usage",
|
||||
"system.cpu.load_average.15m",
|
||||
"system.filesystem.usage",
|
||||
}
|
||||
|
||||
var hostAttrKeysForMetadata = []string{
|
||||
"os.type",
|
||||
}
|
||||
|
||||
// orderByToHostsQueryNames maps the orderBy column to the query/formula names
|
||||
// from HostsTableListQuery used for ranking host groups.
|
||||
var orderByToHostsQueryNames = map[string][]string{
|
||||
inframonitoringtypes.HostsOrderByCPU: {"A", "B", "F1"},
|
||||
inframonitoringtypes.HostsOrderByMemory: {"C", "D", "F2"},
|
||||
inframonitoringtypes.HostsOrderByWait: {"E", "F", "F3"},
|
||||
inframonitoringtypes.HostsOrderByDiskUsage: {"H", "I", "F4"},
|
||||
inframonitoringtypes.HostsOrderByLoad15: {"G"},
|
||||
}
|
||||
|
||||
func (m *module) newHostsTableListQuery() *qbtypes.QueryRangeRequest {
|
||||
return &qbtypes.QueryRangeRequest{
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: []qbtypes.QueryEnvelope{
|
||||
// Query A: CPU usage logic (non-idle)
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "A",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "system.cpu.time",
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "state != 'idle'",
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
|
||||
Disabled: true,
|
||||
},
|
||||
},
|
||||
// Query B: CPU usage (all states)
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "B",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "system.cpu.time",
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
|
||||
Disabled: true,
|
||||
},
|
||||
},
|
||||
// Formula F1: CPU Usage (%)
|
||||
{
|
||||
Type: qbtypes.QueryTypeFormula,
|
||||
Spec: qbtypes.QueryBuilderFormula{
|
||||
Name: "F1",
|
||||
Expression: "A/B",
|
||||
Legend: "CPU Usage (%)",
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query C: Memory usage (state = used)
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "C",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "system.memory.usage",
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "state = 'used'",
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
|
||||
Disabled: true,
|
||||
},
|
||||
},
|
||||
// Query D: Memory usage (all states)
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "D",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "system.memory.usage",
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
|
||||
Disabled: true,
|
||||
},
|
||||
},
|
||||
// Formula F2: Memory Usage (%)
|
||||
{
|
||||
Type: qbtypes.QueryTypeFormula,
|
||||
Spec: qbtypes.QueryBuilderFormula{
|
||||
Name: "F2",
|
||||
Expression: "C/D",
|
||||
Legend: "Memory Usage (%)",
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query E: CPU Wait time (state = wait)
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "E",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "system.cpu.time",
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "state = 'wait'",
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
|
||||
Disabled: true,
|
||||
},
|
||||
},
|
||||
// Query F: CPU time (all states)
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "F",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "system.cpu.time",
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationRate,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
|
||||
Disabled: true,
|
||||
},
|
||||
},
|
||||
// Formula F3: CPU Wait Time (%)
|
||||
{
|
||||
Type: qbtypes.QueryTypeFormula,
|
||||
Spec: qbtypes.QueryBuilderFormula{
|
||||
Name: "F3",
|
||||
Expression: "E/F",
|
||||
Legend: "CPU Wait Time (%)",
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query G: Load15
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "G",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Legend: "CPU Load Average (15m)",
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "system.cpu.load_average.15m",
|
||||
Temporality: metrictypes.Unspecified,
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
// Query H: Filesystem Usage (state = used)
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "H",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "system.filesystem.usage",
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
Filter: &qbtypes.Filter{
|
||||
Expression: "state = 'used'",
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
|
||||
Disabled: true,
|
||||
},
|
||||
},
|
||||
// Query I: Filesystem Usage (all states)
|
||||
{
|
||||
Type: qbtypes.QueryTypeBuilder,
|
||||
Spec: qbtypes.QueryBuilderQuery[qbtypes.MetricAggregation]{
|
||||
Name: "I",
|
||||
Signal: telemetrytypes.SignalMetrics,
|
||||
Aggregations: []qbtypes.MetricAggregation{
|
||||
{
|
||||
MetricName: "system.filesystem.usage",
|
||||
Temporality: metrictypes.Cumulative,
|
||||
TimeAggregation: metrictypes.TimeAggregationAvg,
|
||||
SpaceAggregation: metrictypes.SpaceAggregationSum,
|
||||
ReduceTo: qbtypes.ReduceToAvg,
|
||||
},
|
||||
},
|
||||
GroupBy: []qbtypes.GroupByKey{hostNameGroupByKey},
|
||||
Disabled: true,
|
||||
},
|
||||
},
|
||||
// Formula F4: Disk Usage (%)
|
||||
{
|
||||
Type: qbtypes.QueryTypeFormula,
|
||||
Spec: qbtypes.QueryBuilderFormula{
|
||||
Name: "F4",
|
||||
Expression: "H/I",
|
||||
Legend: "Disk Usage (%)",
|
||||
Disabled: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getTopHostGroups runs a ranking query for the ordering metric, sorts the
|
||||
// results, paginates, and backfills from metadataMap when the page extends
|
||||
// past the metric-ranked groups.
|
||||
func (m *module) getTopHostGroups(
|
||||
ctx context.Context,
|
||||
orgID valuer.UUID,
|
||||
req *inframonitoringtypes.HostsListRequest,
|
||||
metadataMap map[string]map[string]string,
|
||||
) ([]map[string]string, error) {
|
||||
orderByKey := req.OrderBy.Key.Name
|
||||
queryNamesForOrderBy := orderByToHostsQueryNames[orderByKey]
|
||||
// The last entry is the formula/query whose value we sort by.
|
||||
rankingQueryName := queryNamesForOrderBy[len(queryNamesForOrderBy)-1]
|
||||
|
||||
topReq := &qbtypes.QueryRangeRequest{
|
||||
Start: uint64(req.Start),
|
||||
End: uint64(req.End),
|
||||
RequestType: qbtypes.RequestTypeScalar,
|
||||
CompositeQuery: qbtypes.CompositeQuery{
|
||||
Queries: make([]qbtypes.QueryEnvelope, 0, len(queryNamesForOrderBy)),
|
||||
},
|
||||
}
|
||||
|
||||
for _, envelope := range m.newHostsTableListQuery().CompositeQuery.Queries {
|
||||
if !slices.Contains(queryNamesForOrderBy, envelope.GetQueryName()) {
|
||||
continue
|
||||
}
|
||||
copied := envelope
|
||||
if copied.Type == qbtypes.QueryTypeBuilder {
|
||||
existingExpr := ""
|
||||
if f := copied.GetFilter(); f != nil {
|
||||
existingExpr = f.Expression
|
||||
}
|
||||
reqFilterExpr := ""
|
||||
if req.Filter != nil {
|
||||
reqFilterExpr = req.Filter.Expression
|
||||
}
|
||||
merged := mergeFilterExpressions(existingExpr, reqFilterExpr)
|
||||
copied.SetFilter(&qbtypes.Filter{Expression: merged})
|
||||
copied.SetGroupBy(req.GroupBy)
|
||||
}
|
||||
topReq.CompositeQuery.Queries = append(topReq.CompositeQuery.Queries, copied)
|
||||
}
|
||||
|
||||
resp, err := m.querier.QueryRange(ctx, orgID, topReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allMetricGroups := parseAndSortGroups(resp, rankingQueryName, req.GroupBy, req.OrderBy.Direction)
|
||||
return paginateWithBackfill(allMetricGroups, metadataMap, req.GroupBy, req.Offset, req.Limit), nil
|
||||
}
|
||||
|
||||
// applyHostsActiveStatusFilter modifies req.Filter.Expression to include an IN/NOT IN
|
||||
// clause based on FilterByStatus and the set of active hosts.
|
||||
// Returns true if the caller should short-circuit with an empty result (eg. ACTIVE
|
||||
// requested but no hosts are active).
|
||||
func (m *module) applyHostsActiveStatusFilter(req *inframonitoringtypes.HostsListRequest, activeHostsMap map[string]bool) (shouldShortCircuit bool) {
|
||||
if req.Filter == nil || (req.Filter.FilterByStatus != inframonitoringtypes.HostStatusActive && req.Filter.FilterByStatus != inframonitoringtypes.HostStatusInactive) {
|
||||
return false
|
||||
}
|
||||
|
||||
activeHosts := make([]string, 0, len(activeHostsMap))
|
||||
for host := range activeHostsMap {
|
||||
activeHosts = append(activeHosts, fmt.Sprintf("'%s'", host))
|
||||
}
|
||||
|
||||
if len(activeHosts) == 0 {
|
||||
return req.Filter.FilterByStatus == inframonitoringtypes.HostStatusActive
|
||||
}
|
||||
|
||||
op := "IN"
|
||||
if req.Filter.FilterByStatus == inframonitoringtypes.HostStatusInactive {
|
||||
op = "NOT IN"
|
||||
}
|
||||
statusClause := fmt.Sprintf("%s %s (%s)", hostNameAttrKey, op, strings.Join(activeHosts, ", "))
|
||||
req.Filter.Expression = mergeFilterExpressions(req.Filter.Expression, statusClause)
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *module) getHostsTableMetadata(ctx context.Context, req *inframonitoringtypes.HostsListRequest) (map[string]map[string]string, error) {
|
||||
var nonGroupByAttrs []string
|
||||
for _, key := range hostAttrKeysForMetadata {
|
||||
if !isKeyInGroupByAttrs(req.GroupBy, key) {
|
||||
nonGroupByAttrs = append(nonGroupByAttrs, key)
|
||||
}
|
||||
}
|
||||
var filter *qbtypes.Filter
|
||||
if req.Filter != nil {
|
||||
filter = &req.Filter.Filter
|
||||
}
|
||||
metadataMap, err := m.getMetadata(ctx, hostsTableMetricNamesList, req.GroupBy, nonGroupByAttrs, filter, req.Start, req.End)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return metadataMap, nil
|
||||
}
|
||||
|
||||
// buildHostRecords constructs the final list of HostRecords for a page.
|
||||
// Groups that had no metric data get default values of -1.
|
||||
func (m *module) buildHostRecords(
|
||||
resp *qbtypes.QueryRangeResponse,
|
||||
pageGroups []map[string]string,
|
||||
groupBy []qbtypes.GroupByKey,
|
||||
metadataMap map[string]map[string]string,
|
||||
activeHostsMap map[string]bool,
|
||||
) []inframonitoringtypes.HostRecord {
|
||||
metricsMap := parseFullQueryResponse(resp, groupBy)
|
||||
|
||||
records := make([]inframonitoringtypes.HostRecord, 0, len(pageGroups))
|
||||
for _, labels := range pageGroups {
|
||||
compositeKey := compositeKeyFromLabels(labels, groupBy)
|
||||
hostName := labels[hostNameAttrKey]
|
||||
|
||||
activeStatus := inframonitoringtypes.HostStatusNone
|
||||
if hostName != "" {
|
||||
if activeHostsMap[hostName] {
|
||||
activeStatus = inframonitoringtypes.HostStatusActive
|
||||
} else {
|
||||
activeStatus = inframonitoringtypes.HostStatusInactive
|
||||
}
|
||||
}
|
||||
|
||||
record := inframonitoringtypes.HostRecord{
|
||||
HostName: hostName,
|
||||
Status: activeStatus,
|
||||
CPU: -1,
|
||||
Memory: -1,
|
||||
Wait: -1,
|
||||
Load15: -1,
|
||||
DiskUsage: -1,
|
||||
Meta: map[string]interface{}{},
|
||||
}
|
||||
|
||||
if metrics, ok := metricsMap[compositeKey]; ok {
|
||||
if v, exists := metrics["F1"]; exists {
|
||||
record.CPU = v
|
||||
}
|
||||
if v, exists := metrics["F2"]; exists {
|
||||
record.Memory = v
|
||||
}
|
||||
if v, exists := metrics["F3"]; exists {
|
||||
record.Wait = v
|
||||
}
|
||||
if v, exists := metrics["F4"]; exists {
|
||||
record.DiskUsage = v
|
||||
}
|
||||
if v, exists := metrics["G"]; exists {
|
||||
record.Load15 = v
|
||||
}
|
||||
}
|
||||
|
||||
if attrs, ok := metadataMap[compositeKey]; ok {
|
||||
for k, v := range attrs {
|
||||
record.Meta[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
records = append(records, record)
|
||||
}
|
||||
return records
|
||||
}
|
||||
|
||||
// getActiveHosts returns a set of host names that have reported metrics recently (since sinceUnixMilli).
|
||||
// It queries distributed_metadata for hosts where last_reported_unix_milli >= sinceUnixMilli.
|
||||
// TODO(nikhilmantri0902): This method does not return active hosts numbers based on custom grouping by. So
|
||||
// if we have a different group by key than host.name in the API, then this method's response, will be useless technically, because
|
||||
// with a group-by different from host.name, we should show count of active-inactive hosts in that group.
|
||||
// We should have a way to determine active groups based on the group by keys in the request.
|
||||
func (m *module) getActiveHosts(ctx context.Context, metricNames []string, hostNameAttr string) (map[string]bool, error) {
|
||||
sinceUnixMilli := time.Now().Add(-10 * time.Minute).UTC().UnixMilli()
|
||||
|
||||
sb := sqlbuilder.NewSelectBuilder()
|
||||
sb.Distinct()
|
||||
sb.Select("attr_string_value")
|
||||
sb.From(fmt.Sprintf("%s.%s", telemetrymetrics.DBName, telemetrymetrics.AttributesMetadataTableName))
|
||||
sb.Where(
|
||||
sb.In("metric_name", sqlbuilder.List(metricNames)),
|
||||
sb.E("attr_name", hostNameAttr),
|
||||
sb.NE("attr_string_value", ""),
|
||||
sb.GE("last_reported_unix_milli", sinceUnixMilli),
|
||||
)
|
||||
|
||||
query, args := sb.BuildWithFlavor(sqlbuilder.ClickHouse)
|
||||
|
||||
rows, err := m.telemetryStore.ClickhouseDB().Query(ctx, query, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
activeHosts := make(map[string]bool)
|
||||
for rows.Next() {
|
||||
var hostName string
|
||||
if err := rows.Scan(&hostName); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if hostName != "" {
|
||||
activeHosts[hostName] = true
|
||||
}
|
||||
}
|
||||
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return activeHosts, nil
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
package implinframonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
|
||||
"github.com/SigNoz/signoz/pkg/querier"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrymetrics"
|
||||
"github.com/SigNoz/signoz/pkg/telemetrystore"
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type module struct {
|
||||
telemetryStore telemetrystore.TelemetryStore
|
||||
telemetryMetadataStore telemetrytypes.MetadataStore
|
||||
querier querier.Querier
|
||||
fieldMapper qbtypes.FieldMapper
|
||||
condBuilder qbtypes.ConditionBuilder
|
||||
logger *slog.Logger
|
||||
config inframonitoring.Config
|
||||
}
|
||||
|
||||
// NewModule constructs the inframonitoring module with the provided dependencies.
|
||||
func NewModule(
|
||||
telemetryStore telemetrystore.TelemetryStore,
|
||||
telemetryMetadataStore telemetrytypes.MetadataStore,
|
||||
querier querier.Querier,
|
||||
providerSettings factory.ProviderSettings,
|
||||
cfg inframonitoring.Config,
|
||||
) inframonitoring.Module {
|
||||
fieldMapper := telemetrymetrics.NewFieldMapper()
|
||||
condBuilder := telemetrymetrics.NewConditionBuilder(fieldMapper)
|
||||
return &module{
|
||||
telemetryStore: telemetryStore,
|
||||
telemetryMetadataStore: telemetryMetadataStore,
|
||||
querier: querier,
|
||||
fieldMapper: fieldMapper,
|
||||
condBuilder: condBuilder,
|
||||
logger: providerSettings.Logger,
|
||||
config: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *module) HostsList(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.HostsListRequest) (*inframonitoringtypes.HostsListResponse, error) {
|
||||
if err := req.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := &inframonitoringtypes.HostsListResponse{}
|
||||
|
||||
// default to cpu order by
|
||||
if req.OrderBy == nil {
|
||||
req.OrderBy = &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "cpu",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
}
|
||||
}
|
||||
|
||||
// default to host name group by
|
||||
if len(req.GroupBy) == 0 {
|
||||
req.GroupBy = []qbtypes.GroupByKey{hostNameGroupByKey}
|
||||
resp.Type = inframonitoringtypes.ResponseTypeList
|
||||
} else {
|
||||
resp.Type = inframonitoringtypes.ResponseTypeGroupedList
|
||||
}
|
||||
|
||||
// 1. Check which required metrics exist and get earliest retention time.
|
||||
// If any required metric is missing, return early with the list of missing metrics.
|
||||
// 2. If metrics exist but req.End is before the earliest reported time, convey retention boundary.
|
||||
missingMetrics, minFirstReportedUnixMilli, err := m.getMetricsExistenceAndEarliestTime(ctx, hostsTableMetricNamesList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(missingMetrics) > 0 {
|
||||
resp.RequiredMetricsCheck = inframonitoringtypes.RequiredMetricsCheck{MissingMetrics: missingMetrics}
|
||||
resp.Records = []inframonitoringtypes.HostRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
if req.End < int64(minFirstReportedUnixMilli) {
|
||||
resp.EndTimeBeforeRetention = true
|
||||
resp.Records = []inframonitoringtypes.HostRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// TODO: replace this separate ClickHouse query with a sub-query inside the main query builder query
|
||||
// once QB supports sub-queries. Tracked in PR #10805 review.
|
||||
// Determine active hosts: those with metrics reported in the last 10 minutes.
|
||||
activeHostsMap, err := m.getActiveHosts(ctx, hostsTableMetricNamesList, hostNameAttrKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// this check below modifies req.Filter by adding `AND active hosts filter` if req.FilterByStatus is set.
|
||||
if m.applyHostsActiveStatusFilter(req, activeHostsMap) {
|
||||
resp.Records = []inframonitoringtypes.HostRecord{}
|
||||
resp.Total = 0
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
metadataMap, err := m.getHostsTableMetadata(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if metadataMap == nil {
|
||||
metadataMap = make(map[string]map[string]string)
|
||||
}
|
||||
|
||||
resp.Total = len(metadataMap)
|
||||
|
||||
pageGroups, err := m.getTopHostGroups(ctx, orgID, req, metadataMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(pageGroups) == 0 {
|
||||
resp.Records = []inframonitoringtypes.HostRecord{}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
hostsFilterExpr := ""
|
||||
if req.Filter != nil {
|
||||
hostsFilterExpr = req.Filter.Expression
|
||||
}
|
||||
fullQueryReq := buildFullQueryRequest(req.Start, req.End, hostsFilterExpr, req.GroupBy, pageGroups, m.newHostsTableListQuery())
|
||||
queryResp, err := m.querier.QueryRange(ctx, orgID, fullQueryReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp.Records = m.buildHostRecords(queryResp, pageGroups, req.GroupBy, metadataMap, activeHostsMap)
|
||||
resp.Warning = queryResp.Warning
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package inframonitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/types/inframonitoringtypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type Handler interface {
|
||||
HostsList(http.ResponseWriter, *http.Request)
|
||||
}
|
||||
|
||||
type Module interface {
|
||||
HostsList(ctx context.Context, orgID valuer.UUID, req *inframonitoringtypes.HostsListRequest) (*inframonitoringtypes.HostsListResponse, error)
|
||||
}
|
||||
@@ -587,7 +587,6 @@ func (aH *APIHandler) RegisterRoutes(router *mux.Router, am *middleware.AuthZ) {
|
||||
router.HandleFunc("/api/v1/query_filter/analyze", am.ViewAccess(aH.QueryParserAPI.AnalyzeQueryFilter)).Methods(http.MethodPost)
|
||||
}
|
||||
|
||||
|
||||
func Intersection(a, b []int) (c []int) {
|
||||
m := make(map[int]bool)
|
||||
|
||||
|
||||
@@ -244,6 +244,20 @@ func (s *Server) createPublicServer(api *APIHandler, web web.Web) (*http.Server,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
routePrefix := s.config.Global.ExternalPath()
|
||||
if routePrefix != "" {
|
||||
prefixed := http.StripPrefix(routePrefix, handler)
|
||||
handler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
switch req.URL.Path {
|
||||
case "/api/v1/health", "/api/v2/healthz", "/api/v2/readyz", "/api/v2/livez":
|
||||
r.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
prefixed.ServeHTTP(w, req)
|
||||
})
|
||||
}
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}, nil
|
||||
|
||||
@@ -23,7 +23,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/identn"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation"
|
||||
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
|
||||
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/serviceaccount"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user"
|
||||
@@ -115,9 +114,6 @@ type Config struct {
|
||||
// MetricsExplorer config
|
||||
MetricsExplorer metricsexplorer.Config `mapstructure:"metricsexplorer"`
|
||||
|
||||
// InfraMonitoring config
|
||||
InfraMonitoring inframonitoring.Config `mapstructure:"inframonitoring"`
|
||||
|
||||
// Flagger config
|
||||
Flagger flagger.Config `mapstructure:"flagger"`
|
||||
|
||||
@@ -161,7 +157,6 @@ func NewConfig(ctx context.Context, logger *slog.Logger, resolverConfig config.R
|
||||
gateway.NewConfigFactory(),
|
||||
tokenizer.NewConfigFactory(),
|
||||
metricsexplorer.NewConfigFactory(),
|
||||
inframonitoring.NewConfigFactory(),
|
||||
flagger.NewConfigFactory(),
|
||||
user.NewConfigFactory(),
|
||||
identn.NewConfigFactory(),
|
||||
|
||||
@@ -20,8 +20,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard/impldashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/fields"
|
||||
"github.com/SigNoz/signoz/pkg/modules/fields/implfields"
|
||||
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
|
||||
"github.com/SigNoz/signoz/pkg/modules/inframonitoring/implinframonitoring"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer/implmetricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/quickfilter"
|
||||
@@ -55,7 +53,6 @@ type Handlers struct {
|
||||
SpanPercentile spanpercentile.Handler
|
||||
Services services.Handler
|
||||
MetricsExplorer metricsexplorer.Handler
|
||||
InfraMonitoring inframonitoring.Handler
|
||||
Global global.Handler
|
||||
FlaggerHandler flagger.Handler
|
||||
GatewayHandler gateway.Handler
|
||||
@@ -94,7 +91,6 @@ func NewHandlers(
|
||||
RawDataExport: implrawdataexport.NewHandler(modules.RawDataExport),
|
||||
Services: implservices.NewHandler(modules.Services),
|
||||
MetricsExplorer: implmetricsexplorer.NewHandler(modules.MetricsExplorer),
|
||||
InfraMonitoring: implinframonitoring.NewHandler(modules.InfraMonitoring),
|
||||
SpanPercentile: implspanpercentile.NewHandler(modules.SpanPercentile),
|
||||
Global: signozglobal.NewHandler(global),
|
||||
FlaggerHandler: flagger.NewHandler(flaggerService),
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/authdomain/implauthdomain"
|
||||
"github.com/SigNoz/signoz/pkg/modules/cloudintegration"
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
|
||||
"github.com/SigNoz/signoz/pkg/modules/inframonitoring/implinframonitoring"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer/implmetricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
@@ -71,7 +69,6 @@ type Modules struct {
|
||||
Services services.Module
|
||||
SpanPercentile spanpercentile.Module
|
||||
MetricsExplorer metricsexplorer.Module
|
||||
InfraMonitoring inframonitoring.Module
|
||||
Promote promote.Module
|
||||
ServiceAccount serviceaccount.Module
|
||||
CloudIntegration cloudintegration.Module
|
||||
@@ -122,7 +119,6 @@ func NewModules(
|
||||
SpanPercentile: implspanpercentile.NewModule(querier, providerSettings),
|
||||
Services: implservices.NewModule(querier, telemetryStore),
|
||||
MetricsExplorer: implmetricsexplorer.NewModule(telemetryStore, telemetryMetadataStore, cache, ruleStore, dashboard, providerSettings, config.MetricsExplorer),
|
||||
InfraMonitoring: implinframonitoring.NewModule(telemetryStore, telemetryMetadataStore, querier, providerSettings, config.InfraMonitoring),
|
||||
Promote: implpromote.NewModule(telemetryMetadataStore, telemetryStore),
|
||||
ServiceAccount: serviceAccount,
|
||||
RuleStateHistory: implrulestatehistory.NewModule(implrulestatehistory.NewStore(telemetryStore, telemetryMetadataStore, providerSettings.Logger)),
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/modules/dashboard"
|
||||
"github.com/SigNoz/signoz/pkg/modules/fields"
|
||||
"github.com/SigNoz/signoz/pkg/modules/metricsexplorer"
|
||||
"github.com/SigNoz/signoz/pkg/modules/inframonitoring"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/preference"
|
||||
"github.com/SigNoz/signoz/pkg/modules/promote"
|
||||
@@ -61,7 +60,6 @@ func NewOpenAPI(ctx context.Context, instrumentation instrumentation.Instrumenta
|
||||
struct{ dashboard.Module }{},
|
||||
struct{ dashboard.Handler }{},
|
||||
struct{ metricsexplorer.Handler }{},
|
||||
struct{ inframonitoring.Handler }{},
|
||||
struct{ gateway.Handler }{},
|
||||
struct{ fields.Handler }{},
|
||||
struct{ authz.Handler }{},
|
||||
|
||||
@@ -88,9 +88,9 @@ func NewCacheProviderFactories() factory.NamedMap[factory.ProviderFactory[cache.
|
||||
)
|
||||
}
|
||||
|
||||
func NewWebProviderFactories() factory.NamedMap[factory.ProviderFactory[web.Web, web.Config]] {
|
||||
func NewWebProviderFactories(globalConfig global.Config) factory.NamedMap[factory.ProviderFactory[web.Web, web.Config]] {
|
||||
return factory.MustNewNamedMap(
|
||||
routerweb.NewFactory(),
|
||||
routerweb.NewFactory(globalConfig),
|
||||
noopweb.NewFactory(),
|
||||
)
|
||||
}
|
||||
@@ -278,7 +278,6 @@ func NewAPIServerProviderFactories(orgGetter organization.Getter, authz authz.Au
|
||||
modules.Dashboard,
|
||||
handlers.Dashboard,
|
||||
handlers.MetricsExplorer,
|
||||
handlers.InfraMonitoring,
|
||||
handlers.GatewayHandler,
|
||||
handlers.Fields,
|
||||
handlers.AuthzHandler,
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/SigNoz/signoz/pkg/alertmanager/nfmanager/nfmanagertest"
|
||||
"github.com/SigNoz/signoz/pkg/analytics"
|
||||
"github.com/SigNoz/signoz/pkg/flagger"
|
||||
"github.com/SigNoz/signoz/pkg/global"
|
||||
"github.com/SigNoz/signoz/pkg/instrumentation/instrumentationtest"
|
||||
"github.com/SigNoz/signoz/pkg/modules/organization/implorganization"
|
||||
"github.com/SigNoz/signoz/pkg/modules/user/impluser"
|
||||
@@ -34,7 +35,7 @@ func TestNewProviderFactories(t *testing.T) {
|
||||
})
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
NewWebProviderFactories()
|
||||
NewWebProviderFactories(global.Config{})
|
||||
})
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type ResponseType struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
ResponseTypeList = ResponseType{valuer.NewString("list")}
|
||||
ResponseTypeGroupedList = ResponseType{valuer.NewString("grouped_list")}
|
||||
)
|
||||
|
||||
func (ResponseType) Enum() []any {
|
||||
return []any{
|
||||
ResponseTypeList,
|
||||
ResponseTypeGroupedList,
|
||||
}
|
||||
}
|
||||
@@ -1,150 +0,0 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
)
|
||||
|
||||
type HostStatus struct {
|
||||
valuer.String
|
||||
}
|
||||
|
||||
var (
|
||||
HostStatusActive = HostStatus{valuer.NewString("active")}
|
||||
HostStatusInactive = HostStatus{valuer.NewString("inactive")}
|
||||
HostStatusNone = HostStatus{valuer.NewString("")}
|
||||
)
|
||||
|
||||
func (HostStatus) Enum() []any {
|
||||
return []any{
|
||||
HostStatusActive,
|
||||
HostStatusInactive,
|
||||
HostStatusNone,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
HostsOrderByCPU = "cpu"
|
||||
HostsOrderByMemory = "memory"
|
||||
HostsOrderByWait = "wait"
|
||||
HostsOrderByDiskUsage = "disk_usage"
|
||||
HostsOrderByLoad15 = "load15"
|
||||
)
|
||||
|
||||
var HostsValidOrderByKeys = []string{
|
||||
HostsOrderByCPU,
|
||||
HostsOrderByMemory,
|
||||
HostsOrderByWait,
|
||||
HostsOrderByDiskUsage,
|
||||
HostsOrderByLoad15,
|
||||
}
|
||||
|
||||
type HostFilter struct {
|
||||
qbtypes.Filter `json:",inline"`
|
||||
FilterByStatus HostStatus `json:"filterByStatus"`
|
||||
}
|
||||
|
||||
type HostsListRequest struct {
|
||||
Start int64 `json:"start"`
|
||||
End int64 `json:"end"`
|
||||
Filter *HostFilter `json:"filter"`
|
||||
GroupBy []qbtypes.GroupByKey `json:"groupBy"`
|
||||
OrderBy *qbtypes.OrderBy `json:"orderBy"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
}
|
||||
|
||||
// Validate ensures HostsListRequest contains acceptable values.
|
||||
func (req *HostsListRequest) Validate() error {
|
||||
if req == nil {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "request is nil")
|
||||
}
|
||||
|
||||
if req.Start <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid start time %d: start must be greater than 0",
|
||||
req.Start,
|
||||
)
|
||||
}
|
||||
|
||||
if req.End <= 0 {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid end time %d: end must be greater than 0",
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Start >= req.End {
|
||||
return errors.NewInvalidInputf(
|
||||
errors.CodeInvalidInput,
|
||||
"invalid time range: start (%d) must be less than end (%d)",
|
||||
req.Start,
|
||||
req.End,
|
||||
)
|
||||
}
|
||||
|
||||
if req.Limit < 1 || req.Limit > 5000 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "limit must be between 1 and 5000")
|
||||
}
|
||||
|
||||
if req.Offset < 0 {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "offset cannot be negative")
|
||||
}
|
||||
|
||||
if req.Filter != nil && !req.Filter.FilterByStatus.IsZero() &&
|
||||
req.Filter.FilterByStatus != HostStatusActive && req.Filter.FilterByStatus != HostStatusInactive {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid filter by status: %s", req.Filter.FilterByStatus)
|
||||
}
|
||||
|
||||
if req.OrderBy != nil {
|
||||
if !slices.Contains(HostsValidOrderByKeys, req.OrderBy.Key.Name) {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by key: %s", req.OrderBy.Key.Name)
|
||||
}
|
||||
if req.OrderBy.Direction != qbtypes.OrderDirectionAsc && req.OrderBy.Direction != qbtypes.OrderDirectionDesc {
|
||||
return errors.NewInvalidInputf(errors.CodeInvalidInput, "invalid order by direction: %s", req.OrderBy.Direction)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON validates input immediately after decoding.
|
||||
func (req *HostsListRequest) UnmarshalJSON(data []byte) error {
|
||||
type raw HostsListRequest
|
||||
var decoded raw
|
||||
if err := json.Unmarshal(data, &decoded); err != nil {
|
||||
return err
|
||||
}
|
||||
*req = HostsListRequest(decoded)
|
||||
return req.Validate()
|
||||
}
|
||||
|
||||
type RequiredMetricsCheck struct {
|
||||
MissingMetrics []string `json:"missingMetrics"`
|
||||
}
|
||||
|
||||
type HostsListResponse struct {
|
||||
Type ResponseType `json:"type"`
|
||||
Records []HostRecord `json:"records"`
|
||||
Total int `json:"total"`
|
||||
RequiredMetricsCheck RequiredMetricsCheck `json:"requiredMetricsCheck"`
|
||||
EndTimeBeforeRetention bool `json:"endTimeBeforeRetention"`
|
||||
Warning *qbtypes.QueryWarnData `json:"warning,omitempty"`
|
||||
}
|
||||
|
||||
type HostRecord struct {
|
||||
HostName string `json:"hostName"`
|
||||
Status HostStatus `json:"status"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory float64 `json:"memory"`
|
||||
Wait float64 `json:"wait"`
|
||||
Load15 float64 `json:"load15"`
|
||||
DiskUsage float64 `json:"diskUsage"`
|
||||
Meta map[string]interface{} `json:"meta"`
|
||||
}
|
||||
@@ -1,244 +0,0 @@
|
||||
package inframonitoringtypes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
qbtypes "github.com/SigNoz/signoz/pkg/types/querybuildertypes/querybuildertypesv5"
|
||||
"github.com/SigNoz/signoz/pkg/types/telemetrytypes"
|
||||
"github.com/SigNoz/signoz/pkg/valuer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHostsListRequest_Validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
req *HostsListRequest
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid request",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nil request",
|
||||
req: nil,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time zero",
|
||||
req: &HostsListRequest{
|
||||
Start: 0,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time negative",
|
||||
req: &HostsListRequest{
|
||||
Start: -1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "end time zero",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 0,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time greater than end time",
|
||||
req: &HostsListRequest{
|
||||
Start: 2000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "start time equal to end time",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 1000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit zero",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 0,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit negative",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: -10,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "limit exceeds max",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 5001,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "offset negative",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: -5,
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "filter by status ACTIVE",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
Filter: &HostFilter{FilterByStatus: HostStatusActive},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "filter by status INACTIVE",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
Filter: &HostFilter{FilterByStatus: HostStatusInactive},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "filter by status empty (zero value)",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "filter by status invalid value",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
Filter: &HostFilter{FilterByStatus: HostStatus{valuer.NewString("UNKNOWN")}},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy nil is valid",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key cpu and direction asc",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: HostsOrderByCPU,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionAsc,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "orderBy with invalid key",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: "unknown",
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirectionDesc,
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "orderBy with valid key but invalid direction",
|
||||
req: &HostsListRequest{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Limit: 100,
|
||||
Offset: 0,
|
||||
OrderBy: &qbtypes.OrderBy{
|
||||
Key: qbtypes.OrderByKey{
|
||||
TelemetryFieldKey: telemetrytypes.TelemetryFieldKey{
|
||||
Name: HostsOrderByMemory,
|
||||
},
|
||||
},
|
||||
Direction: qbtypes.OrderDirection{String: valuer.NewString("invalid")},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.req.Validate()
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Ast(err, errors.TypeInvalidInput), "expected error to be of type InvalidInput")
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -8,10 +8,11 @@ import (
|
||||
type Config struct {
|
||||
// Whether the web package is enabled.
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
// The prefix to serve the files from
|
||||
Prefix string `mapstructure:"prefix"`
|
||||
// The directory containing the static build files. The root of this directory should
|
||||
// have an index.html file.
|
||||
|
||||
// The name of the index file to serve.
|
||||
Index string `mapstructure:"index"`
|
||||
|
||||
// The directory from which to serve the web files.
|
||||
Directory string `mapstructure:"directory"`
|
||||
}
|
||||
|
||||
@@ -22,7 +23,7 @@ func NewConfigFactory() factory.ConfigFactory {
|
||||
func newConfig() factory.Config {
|
||||
return &Config{
|
||||
Enabled: true,
|
||||
Prefix: "/",
|
||||
Index: "index.html",
|
||||
Directory: "/etc/signoz/web",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
)
|
||||
|
||||
func TestNewWithEnvProvider(t *testing.T) {
|
||||
t.Setenv("SIGNOZ_WEB_PREFIX", "/web")
|
||||
t.Setenv("SIGNOZ_WEB_ENABLED", "false")
|
||||
|
||||
conf, err := config.New(
|
||||
@@ -37,7 +36,7 @@ func TestNewWithEnvProvider(t *testing.T) {
|
||||
|
||||
expected := &Config{
|
||||
Enabled: false,
|
||||
Prefix: "/web",
|
||||
Index: def.Index,
|
||||
Directory: def.Directory,
|
||||
}
|
||||
|
||||
|
||||
@@ -8,56 +8,55 @@ import (
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
"github.com/SigNoz/signoz/pkg/factory"
|
||||
"github.com/SigNoz/signoz/pkg/global"
|
||||
"github.com/SigNoz/signoz/pkg/http/middleware"
|
||||
"github.com/SigNoz/signoz/pkg/web"
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
const (
|
||||
indexFileName string = "index.html"
|
||||
)
|
||||
|
||||
type provider struct {
|
||||
config web.Config
|
||||
config web.Config
|
||||
indexContents []byte
|
||||
fileHandler http.Handler
|
||||
}
|
||||
|
||||
func NewFactory() factory.ProviderFactory[web.Web, web.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("router"), New)
|
||||
func NewFactory(globalConfig global.Config) factory.ProviderFactory[web.Web, web.Config] {
|
||||
return factory.NewProviderFactory(factory.MustNewName("router"), func(ctx context.Context, settings factory.ProviderSettings, config web.Config) (web.Web, error) {
|
||||
return New(ctx, settings, config, globalConfig)
|
||||
})
|
||||
}
|
||||
|
||||
func New(ctx context.Context, settings factory.ProviderSettings, config web.Config) (web.Web, error) {
|
||||
func New(ctx context.Context, settings factory.ProviderSettings, config web.Config, globalConfig global.Config) (web.Web, error) {
|
||||
fi, err := os.Stat(config.Directory)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "cannot access web directory")
|
||||
}
|
||||
|
||||
ok := fi.IsDir()
|
||||
if !ok {
|
||||
if !fi.IsDir() {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "web directory is not a directory")
|
||||
}
|
||||
|
||||
fi, err = os.Stat(filepath.Join(config.Directory, indexFileName))
|
||||
indexPath := filepath.Join(config.Directory, config.Index)
|
||||
raw, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
return nil, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "cannot access %q in web directory", indexFileName)
|
||||
return nil, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "cannot read %q in web directory", config.Index)
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) || fi.IsDir() {
|
||||
return nil, errors.NewInvalidInputf(errors.CodeInvalidInput, "%q does not exist", indexFileName)
|
||||
}
|
||||
logger := factory.NewScopedProviderSettings(settings, "github.com/SigNoz/signoz/pkg/web/routerweb").Logger()
|
||||
indexContents := web.NewIndex(ctx, logger, config.Index, raw, web.TemplateData{BaseHref: globalConfig.ExternalPathTrailing()})
|
||||
|
||||
return &provider{
|
||||
config: config,
|
||||
config: config,
|
||||
indexContents: indexContents,
|
||||
fileHandler: http.FileServer(http.Dir(config.Directory)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *provider) AddToRouter(router *mux.Router) error {
|
||||
cache := middleware.NewCache(0)
|
||||
err := router.PathPrefix(provider.config.Prefix).
|
||||
err := router.PathPrefix("/").
|
||||
Handler(
|
||||
http.StripPrefix(
|
||||
provider.config.Prefix,
|
||||
cache.Wrap(http.HandlerFunc(provider.ServeHTTP)),
|
||||
),
|
||||
cache.Wrap(http.HandlerFunc(provider.ServeHTTP)),
|
||||
).GetError()
|
||||
if err != nil {
|
||||
return errors.WrapInternalf(err, errors.CodeInternal, "unable to add web to router")
|
||||
@@ -75,7 +74,7 @@ func (provider *provider) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
|
||||
if err != nil {
|
||||
// if the file doesn't exist, serve index.html
|
||||
if os.IsNotExist(err) {
|
||||
http.ServeFile(rw, req, filepath.Join(provider.config.Directory, indexFileName))
|
||||
provider.serveIndex(rw)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -87,10 +86,15 @@ func (provider *provider) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
|
||||
|
||||
if fi.IsDir() {
|
||||
// path is a directory, serve index.html
|
||||
http.ServeFile(rw, req, filepath.Join(provider.config.Directory, indexFileName))
|
||||
provider.serveIndex(rw)
|
||||
return
|
||||
}
|
||||
|
||||
// otherwise, use http.FileServer to serve the static file
|
||||
http.FileServer(http.Dir(provider.config.Directory)).ServeHTTP(rw, req)
|
||||
provider.fileHandler.ServeHTTP(rw, req)
|
||||
}
|
||||
|
||||
func (provider *provider) serveIndex(rw http.ResponseWriter) {
|
||||
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
_, _ = rw.Write(provider.indexContents)
|
||||
}
|
||||
|
||||
@@ -5,45 +5,113 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/factory/factorytest"
|
||||
"github.com/SigNoz/signoz/pkg/global"
|
||||
"github.com/SigNoz/signoz/pkg/web"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestServeHttpWithoutPrefix(t *testing.T) {
|
||||
t.Parallel()
|
||||
fi, err := os.Open(filepath.Join("testdata", indexFileName))
|
||||
require.NoError(t, err)
|
||||
func startServer(t *testing.T, config web.Config, globalConfig global.Config) string {
|
||||
t.Helper()
|
||||
|
||||
expected, err := io.ReadAll(fi)
|
||||
require.NoError(t, err)
|
||||
|
||||
web, err := New(context.Background(), factorytest.NewSettings(), web.Config{Prefix: "/", Directory: filepath.Join("testdata")})
|
||||
web, err := New(context.Background(), factorytest.NewSettings(), config, globalConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
router := mux.NewRouter()
|
||||
err = web.AddToRouter(router)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, web.AddToRouter(router))
|
||||
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
server := &http.Server{
|
||||
Handler: router,
|
||||
server := &http.Server{Handler: router}
|
||||
go func() { _ = server.Serve(listener) }()
|
||||
t.Cleanup(func() { _ = server.Close() })
|
||||
|
||||
return "http://" + listener.Addr().String()
|
||||
}
|
||||
|
||||
func httpGet(t *testing.T, url string) string {
|
||||
t.Helper()
|
||||
|
||||
res, err := http.DefaultClient.Get(url)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = res.Body.Close() }()
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
return string(body)
|
||||
}
|
||||
|
||||
func TestServeTemplatedIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
path string
|
||||
globalConfig global.Config
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "RootBaseHrefAtRoot",
|
||||
path: "/",
|
||||
globalConfig: global.Config{},
|
||||
expected: `<html><head><base href="/" /></head><body>Welcome to test data!!!</body></html>`,
|
||||
},
|
||||
{
|
||||
name: "RootBaseHrefAtNonExistentPath",
|
||||
path: "/does-not-exist",
|
||||
globalConfig: global.Config{},
|
||||
expected: `<html><head><base href="/" /></head><body>Welcome to test data!!!</body></html>`,
|
||||
},
|
||||
{
|
||||
name: "RootBaseHrefAtDirectory",
|
||||
path: "/assets",
|
||||
globalConfig: global.Config{},
|
||||
expected: `<html><head><base href="/" /></head><body>Welcome to test data!!!</body></html>`,
|
||||
},
|
||||
{
|
||||
name: "SubPathBaseHrefAtRoot",
|
||||
path: "/",
|
||||
globalConfig: global.Config{ExternalURL: &url.URL{Scheme: "https", Host: "example.com", Path: "/signoz"}},
|
||||
expected: `<html><head><base href="/signoz/" /></head><body>Welcome to test data!!!</body></html>`,
|
||||
},
|
||||
{
|
||||
name: "SubPathBaseHrefAtNonExistentPath",
|
||||
path: "/does-not-exist",
|
||||
globalConfig: global.Config{ExternalURL: &url.URL{Scheme: "https", Host: "example.com", Path: "/signoz"}},
|
||||
expected: `<html><head><base href="/signoz/" /></head><body>Welcome to test data!!!</body></html>`,
|
||||
},
|
||||
{
|
||||
name: "SubPathBaseHrefAtDirectory",
|
||||
path: "/assets",
|
||||
globalConfig: global.Config{ExternalURL: &url.URL{Scheme: "https", Host: "example.com", Path: "/signoz"}},
|
||||
expected: `<html><head><base href="/signoz/" /></head><body>Welcome to test data!!!</body></html>`,
|
||||
},
|
||||
}
|
||||
|
||||
go func() {
|
||||
_ = server.Serve(listener)
|
||||
}()
|
||||
defer func() {
|
||||
_ = server.Close()
|
||||
}()
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
base := startServer(t, web.Config{Index: "valid_template.html", Directory: "testdata"}, testCase.globalConfig)
|
||||
|
||||
assert.Equal(t, testCase.expected, strings.TrimSuffix(httpGet(t, base+testCase.path), "\n"))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServeNoTemplateIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
expected, err := os.ReadFile(filepath.Join("testdata", "no_template.html"))
|
||||
require.NoError(t, err)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -54,11 +122,7 @@ func TestServeHttpWithoutPrefix(t *testing.T) {
|
||||
path: "/",
|
||||
},
|
||||
{
|
||||
name: "Index",
|
||||
path: "/" + indexFileName,
|
||||
},
|
||||
{
|
||||
name: "DoesNotExist",
|
||||
name: "NonExistentPath",
|
||||
path: "/does-not-exist",
|
||||
},
|
||||
{
|
||||
@@ -67,104 +131,55 @@ func TestServeHttpWithoutPrefix(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
res, err := http.DefaultClient.Get("http://" + listener.Addr().String() + tc.path)
|
||||
require.NoError(t, err)
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
base := startServer(t, web.Config{Index: "no_template.html", Directory: "testdata"}, global.Config{})
|
||||
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
|
||||
actual, err := io.ReadAll(res.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
assert.Equal(t, string(expected), httpGet(t, base+testCase.path))
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestServeHttpWithPrefix(t *testing.T) {
|
||||
func TestServeInvalidTemplateIndex(t *testing.T) {
|
||||
t.Parallel()
|
||||
fi, err := os.Open(filepath.Join("testdata", indexFileName))
|
||||
|
||||
expected, err := os.ReadFile(filepath.Join("testdata", "invalid_template.html"))
|
||||
require.NoError(t, err)
|
||||
|
||||
expected, err := io.ReadAll(fi)
|
||||
require.NoError(t, err)
|
||||
|
||||
web, err := New(context.Background(), factorytest.NewSettings(), web.Config{Prefix: "/web", Directory: filepath.Join("testdata")})
|
||||
require.NoError(t, err)
|
||||
|
||||
router := mux.NewRouter()
|
||||
err = web.AddToRouter(router)
|
||||
require.NoError(t, err)
|
||||
|
||||
listener, err := net.Listen("tcp", "localhost:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
server := &http.Server{
|
||||
Handler: router,
|
||||
}
|
||||
|
||||
go func() {
|
||||
_ = server.Serve(listener)
|
||||
}()
|
||||
defer func() {
|
||||
_ = server.Close()
|
||||
}()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
path string
|
||||
found bool
|
||||
name string
|
||||
path string
|
||||
}{
|
||||
{
|
||||
name: "Root",
|
||||
path: "/web",
|
||||
found: true,
|
||||
name: "Root",
|
||||
path: "/",
|
||||
},
|
||||
{
|
||||
name: "Index",
|
||||
path: "/web/" + indexFileName,
|
||||
found: true,
|
||||
name: "NonExistentPath",
|
||||
path: "/does-not-exist",
|
||||
},
|
||||
{
|
||||
name: "FileDoesNotExist",
|
||||
path: "/web/does-not-exist",
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Directory",
|
||||
path: "/web/assets",
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "DoesNotExist",
|
||||
path: "/does-not-exist",
|
||||
found: false,
|
||||
name: "Directory",
|
||||
path: "/assets",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
res, err := http.DefaultClient.Get("http://" + listener.Addr().String() + tc.path)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer func() {
|
||||
_ = res.Body.Close()
|
||||
}()
|
||||
|
||||
if tc.found {
|
||||
actual, err := io.ReadAll(res.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, actual)
|
||||
} else {
|
||||
assert.Equal(t, http.StatusNotFound, res.StatusCode)
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
base := startServer(t, web.Config{Index: "invalid_template.html", Directory: "testdata"}, global.Config{ExternalURL: &url.URL{Path: "/signoz"}})
|
||||
|
||||
assert.Equal(t, string(expected), httpGet(t, base+testCase.path))
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestServeStaticFilesUnchanged(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
expected, err := os.ReadFile(filepath.Join("testdata", "assets", "style.css"))
|
||||
require.NoError(t, err)
|
||||
|
||||
base := startServer(t, web.Config{Index: "valid_template.html", Directory: "testdata"}, global.Config{ExternalURL: &url.URL{Path: "/signoz"}})
|
||||
|
||||
assert.Equal(t, string(expected), httpGet(t, base+"/assets/style.css"))
|
||||
}
|
||||
|
||||
3
pkg/web/routerweb/testdata/assets/index.css
vendored
3
pkg/web/routerweb/testdata/assets/index.css
vendored
@@ -1,3 +0,0 @@
|
||||
#root {
|
||||
background-color: red;
|
||||
}
|
||||
1
pkg/web/routerweb/testdata/assets/style.css
vendored
Normal file
1
pkg/web/routerweb/testdata/assets/style.css
vendored
Normal file
@@ -0,0 +1 @@
|
||||
body { color: red; }
|
||||
1
pkg/web/routerweb/testdata/index.html
vendored
1
pkg/web/routerweb/testdata/index.html
vendored
@@ -1 +0,0 @@
|
||||
<h1>Welcome to test data!!!</h1>
|
||||
1
pkg/web/routerweb/testdata/invalid_template.html
vendored
Normal file
1
pkg/web/routerweb/testdata/invalid_template.html
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<html><head><base href="[[." /></head><body>Bad template</body></html>
|
||||
1
pkg/web/routerweb/testdata/no_template.html
vendored
Normal file
1
pkg/web/routerweb/testdata/no_template.html
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<html><head></head><body>No template here</body></html>
|
||||
1
pkg/web/routerweb/testdata/valid_template.html
vendored
Normal file
1
pkg/web/routerweb/testdata/valid_template.html
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<html><head><base href="[[.BaseHref]]" /></head><body>Welcome to test data!!!</body></html>
|
||||
42
pkg/web/template.go
Normal file
42
pkg/web/template.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package web
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"log/slog"
|
||||
"text/template"
|
||||
|
||||
"github.com/SigNoz/signoz/pkg/errors"
|
||||
)
|
||||
|
||||
// Field names map to the HTML attributes they populate in the template:
|
||||
// - BaseHref → <base href="[[.BaseHref]]" />
|
||||
type TemplateData struct {
|
||||
BaseHref string
|
||||
}
|
||||
|
||||
// If the template cannot be parsed or executed, the raw bytes are
|
||||
// returned unchanged and the error is logged.
|
||||
func NewIndex(ctx context.Context, logger *slog.Logger, name string, raw []byte, data TemplateData) []byte {
|
||||
result, err := NewIndexE(name, raw, data)
|
||||
if err != nil {
|
||||
logger.ErrorContext(ctx, "cannot render index template, serving raw file", slog.String("name", name), errors.Attr(err))
|
||||
return raw
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func NewIndexE(name string, raw []byte, data TemplateData) ([]byte, error) {
|
||||
tmpl, err := template.New(name).Delims("[[", "]]").Parse(string(raw))
|
||||
if err != nil {
|
||||
return nil, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "cannot parse %q as template", name)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, data); err != nil {
|
||||
return nil, errors.WrapInvalidInputf(err, errors.CodeInvalidInput, "cannot execute template for %q", name)
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
Reference in New Issue
Block a user