mirror of
https://github.com/SigNoz/signoz.git
synced 2026-02-09 03:02:20 +00:00
Compare commits
100 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2e71230bbf | ||
|
|
bf2002d6a2 | ||
|
|
59749d0576 | ||
|
|
c9c6ccc687 | ||
|
|
88082c1278 | ||
|
|
84f150bc18 | ||
|
|
299e80ca49 | ||
|
|
7127dec6f6 | ||
|
|
6afb91fa84 | ||
|
|
72f5688194 | ||
|
|
a118c3c8a1 | ||
|
|
9baf873521 | ||
|
|
12911db945 | ||
|
|
bd149f4364 | ||
|
|
c69b9ae62a | ||
|
|
bc3f16d3de | ||
|
|
61bbd5551b | ||
|
|
286577d13d | ||
|
|
dbd0701779 | ||
|
|
0c7a5ce3c7 | ||
|
|
a92381df1b | ||
|
|
eb1509d385 | ||
|
|
34e33af290 | ||
|
|
c0004cd51c | ||
|
|
10bf545c65 | ||
|
|
7d2bcf11c3 | ||
|
|
3ff7ace54e | ||
|
|
abdfe6ccc5 | ||
|
|
aa398263fb | ||
|
|
ace02486e0 | ||
|
|
b318ba6b2f | ||
|
|
de4be411f4 | ||
|
|
362f264bae | ||
|
|
e94d984cdb | ||
|
|
bf0267d579 | ||
|
|
e4b3ea1f34 | ||
|
|
4ee6d4b546 | ||
|
|
a7836c26d0 | ||
|
|
15eb5364d5 | ||
|
|
47bf512a33 | ||
|
|
2776bfa311 | ||
|
|
8c7ac88f84 | ||
|
|
a08ad9e2cf | ||
|
|
d312398f18 | ||
|
|
d891c3e118 | ||
|
|
1e7b68203f | ||
|
|
3d152e23cd | ||
|
|
47cf1eebf7 | ||
|
|
6c84882dca | ||
|
|
a4424eca0e | ||
|
|
77992a59bc | ||
|
|
3cbb071138 | ||
|
|
9cd6e5cabe | ||
|
|
13bec63fca | ||
|
|
f2164a1a86 | ||
|
|
8a4f58e77b | ||
|
|
51a24673b9 | ||
|
|
c94feb9af2 | ||
|
|
a8668d19a8 | ||
|
|
a8e81c9666 | ||
|
|
2eed75560d | ||
|
|
8d6fb7f897 | ||
|
|
4cd0088029 | ||
|
|
872c8adbbb | ||
|
|
bba7344bae | ||
|
|
51fe634566 | ||
|
|
af58d085a0 | ||
|
|
5b9b344816 | ||
|
|
1caa07e0af | ||
|
|
ae23cec8d6 | ||
|
|
5afc04f205 | ||
|
|
6aed23ce66 | ||
|
|
007e2e7b78 | ||
|
|
762a3cdfcd | ||
|
|
308f8f8fed | ||
|
|
588bf2b93a | ||
|
|
fff38b58d2 | ||
|
|
cbd2036613 | ||
|
|
7ef72d4147 | ||
|
|
07af5c843a | ||
|
|
e524ce5743 | ||
|
|
24e1346521 | ||
|
|
62e77613a6 | ||
|
|
56c0265660 | ||
|
|
91b1d08dff | ||
|
|
239c2cb859 | ||
|
|
4173258d0a | ||
|
|
1cbbdd8265 | ||
|
|
433f3f3d94 | ||
|
|
fed23a6ab9 | ||
|
|
b979c24cb4 | ||
|
|
e4b41b1a27 | ||
|
|
44495b7669 | ||
|
|
cc3133b2d6 | ||
|
|
9c83319143 | ||
|
|
571c08c58e | ||
|
|
092cfc7804 | ||
|
|
245050aac2 | ||
|
|
606fa6591d | ||
|
|
a1468cf126 |
33
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
33
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Bug description
|
||||
|
||||
*Please describe.*
|
||||
*If this affects the front-end, screenshots would be of great help.*
|
||||
|
||||
## Expected behavior
|
||||
|
||||
|
||||
|
||||
## How to reproduce
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
## Version information
|
||||
* **Signoz version**:
|
||||
* **Browser version**:
|
||||
* **Your OS and version**:
|
||||
|
||||
## Additional context
|
||||
|
||||
|
||||
#### *Thank you* for your bug report – we love squashing them!
|
||||
27
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
27
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Is your feature request related to a problem?
|
||||
|
||||
*Please describe.*
|
||||
|
||||
## Describe the solution you'd like
|
||||
|
||||
|
||||
|
||||
## Describe alternatives you've considered
|
||||
|
||||
|
||||
|
||||
## Additional context
|
||||
Add any other context or screenshots about the feature request here.
|
||||
|
||||
|
||||
|
||||
#### *Thank you* for your feature request – we love each and every one!
|
||||
33
.github/ISSUE_TEMPLATE/performance-issue-report.md
vendored
Normal file
33
.github/ISSUE_TEMPLATE/performance-issue-report.md
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: Performance issue report
|
||||
about: Long response times, high resource usage? Ensuring that SigNoz is scalable
|
||||
is our top priority
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## In what situation are you experiencing subpar performance?
|
||||
|
||||
*Please describe.*
|
||||
|
||||
## How to reproduce
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
## Your Environment
|
||||
|
||||
- [ ] Linux
|
||||
- [ ] Mac
|
||||
- [ ] Windows
|
||||
|
||||
Please provide details of OS version etc.
|
||||
|
||||
## Additional context
|
||||
|
||||
|
||||
|
||||
#### *Thank you* for your performance issue report – we want SigNoz to be blazing fast!
|
||||
@@ -1,9 +1,64 @@
|
||||
# How to Contribute
|
||||
|
||||
You can always reach out to ankit@signoz.io to understand more about the repo and product. We are very responsive over email and [slack](https://signoz-community.slack.com/join/shared_invite/zt-kj26gm1u-Xe3CYxCu0bGXCrCqKipjOA#/).
|
||||
There are primarily 3 areas in which you can contribute in SigNoz
|
||||
|
||||
- Frontend ( written in Typescript, React)
|
||||
- Query Service (written in Go)
|
||||
- Flattener Processor (written in Go)
|
||||
|
||||
Depending upon your area of expertise & interest, you can chose one or more to contribute. Below are detailed instructions to contribute in each area
|
||||
|
||||
# Develop Frontend
|
||||
|
||||
Need to update [https://github.com/SigNoz/signoz/tree/main/frontend](https://github.com/SigNoz/signoz/tree/main/frontend)
|
||||
|
||||
### Contribute to Frontend with Docker installation of SigNoz
|
||||
|
||||
- `git clone [https://github.com/SigNoz/signoz.git](https://github.com/SigNoz/signoz.git) && cd signoz`
|
||||
- comment out frontend service section at `deploy/docker/clickhouse-setup/docker-compose.yaml#L38`
|
||||
- run `cd deploy && docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` (this will install signoz locally without the frontend service)
|
||||
- `cd ../frontend` and change baseURL to `http://localhost:8080` in file `src/constants/env.ts`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
|
||||
### Contribute to Frontend without installing SigNoz backend
|
||||
|
||||
If you don't want to install SigNoz backend just for doing frontend development, we can provide you with test environments which you can use as the backend. Please ping us in #contributing channel in our [slack community](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA) and we will DM you with `<test environment URL>`
|
||||
|
||||
- `git clone [https://github.com/SigNoz/signoz.git](https://github.com/SigNoz/signoz.git) && cd signoz/frontend`
|
||||
- change baseURL to `<test environment URL>` in file `src/constants/env.ts`
|
||||
- `yarn install`
|
||||
- `yarn dev`
|
||||
|
||||
**_Frontend should now be accessible at `http://localhost:3000/application`_**
|
||||
|
||||
# Contribute to Query-Service
|
||||
|
||||
Need to update [https://github.com/SigNoz/signoz/tree/main/pkg/query-service](https://github.com/SigNoz/signoz/tree/main/pkg/query-service)
|
||||
|
||||
### To run ClickHouse setup (recommended for local development)
|
||||
|
||||
- `git clone [https://github.com/SigNoz/signoz.git](https://github.com/SigNoz/signoz.git) && cd signoz/deploy`
|
||||
- comment out frontend service section at `docker/clickhouse-setup/docker-compose.yaml#L38`
|
||||
- comment out query-service section at `docker/clickhouse-setup/docker-compose.yaml#L22`
|
||||
- Run `docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d` (this will install signoz locally without the frontend and query-service)
|
||||
- `STORAGE=clickhouse ClickHouseUrl=tcp://localhost:9001 go run main.go`
|
||||
|
||||
**_Query Service should now be available at `http://localhost:8080`_**
|
||||
|
||||
> If you want to see how, frontend plays with query service, you can run frontend also in you local env with the baseURL changed to [`http://localhost:8080`](http://ec2-18-191-251-86.us-east-2.compute.amazonaws.com:8080/) in file `src/constants/env.ts` as the query-service is now running at port `8080`
|
||||
|
||||
# Contribute to Flattener Processor
|
||||
|
||||
Not needed to run for the ClickHouse setup
|
||||
|
||||
more info at [https://github.com/SigNoz/signoz/tree/main/pkg/processors/flattener](https://github.com/SigNoz/signoz/tree/main/pkg/processors/flattener)
|
||||
|
||||
## General Instructions
|
||||
|
||||
You can always reach out to `ankit@signoz.io` to understand more about the repo and product. We are very responsive over email and [slack](https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA).
|
||||
|
||||
- You can create a PR (Pull Request)
|
||||
- If you find any bugs, please create an issue
|
||||
- If you find anything missing in documentation, you can create an issue with label **documentation**
|
||||
|
||||
#### If you want to build any new feature, please create an issue with label `enhancement`
|
||||
- If you want to build any new feature, please create an issue with label `enhancement`
|
||||
- If you want to discuss something about the product, start a new [discussion](https://github.com/SigNoz/signoz/discussions)
|
||||
|
||||
@@ -8,13 +8,13 @@
|
||||
<img alt="License" src="https://img.shields.io/badge/license-MIT-brightgreen"> </a>
|
||||
<img alt="Downloads" src="https://img.shields.io/docker/pulls/signoz/frontend?label=Downloads"> </a>
|
||||
<img alt="GitHub issues" src="https://img.shields.io/github/issues/signoz/signoz"> </a>
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNoz_io&hashtags=opensource,signoz,observability">
|
||||
<a href="https://twitter.com/intent/tweet?text=Monitor%20your%20applications%20and%20troubleshoot%20problems%20with%20SigNoz,%20an%20open-source%20alternative%20to%20DataDog,%20NewRelic.&url=https://signoz.io/&via=SigNozHQ&hashtags=opensource,signoz,observability">
|
||||
<img alt="tweet" src="https://img.shields.io/twitter/url/http/shields.io.svg?style=social"> </a>
|
||||
</p>
|
||||
|
||||
##
|
||||
|
||||
SigNoz helps developer monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
|
||||
SigNoz helps developers monitor applications and troubleshoot problems in their deployed applications. SigNoz uses distributed tracing to gain visibility into your software stack.
|
||||
|
||||
👉 You can see metrics like p99 latency, error rates for your services, external API calls and individual end points.
|
||||
|
||||
|
||||
517
deploy/docker/clickhouse-setup/clickhouse-config.xml
Normal file
517
deploy/docker/clickhouse-setup/clickhouse-config.xml
Normal file
@@ -0,0 +1,517 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<logger>
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
</logger>
|
||||
|
||||
<http_port>8123</http_port>
|
||||
<tcp_port>9000</tcp_port>
|
||||
|
||||
<!-- For HTTPS and SSL over native protocol. -->
|
||||
<!--
|
||||
<https_port>8443</https_port>
|
||||
<tcp_ssl_port>9440</tcp_ssl_port>
|
||||
-->
|
||||
|
||||
<!-- Used with https_port and tcp_ssl_port. Full ssl options list: https://github.com/yandex/ClickHouse/blob/master/contrib/libpoco/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
||||
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
|
||||
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
</server>
|
||||
|
||||
<client> <!-- Used for connecting to https dictionary source -->
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
|
||||
<invalidCertificateHandler>
|
||||
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
|
||||
<name>RejectCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
|
||||
<!--
|
||||
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
|
||||
-->
|
||||
|
||||
<!-- Port for communication between replicas. Used for data exchange. -->
|
||||
<interserver_http_port>9009</interserver_http_port>
|
||||
|
||||
<!-- Hostname that is used by other replicas to request this server.
|
||||
If not specified, than it is determined analoguous to 'hostname -f' command.
|
||||
This setting could be used to switch replication to another network interface.
|
||||
-->
|
||||
<!--
|
||||
<interserver_http_host>example.yandex.ru</interserver_http_host>
|
||||
-->
|
||||
|
||||
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
|
||||
<listen_host>::</listen_host>
|
||||
<!-- Same for hosts with disabled ipv6: -->
|
||||
<!-- <listen_host>0.0.0.0</listen_host> -->
|
||||
|
||||
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
|
||||
<!-- <listen_host>0.0.0.0</listen_host> -->
|
||||
|
||||
<max_connections>4096</max_connections>
|
||||
<keep_alive_timeout>3</keep_alive_timeout>
|
||||
|
||||
<!-- Maximum number of concurrent queries. -->
|
||||
<max_concurrent_queries>100</max_concurrent_queries>
|
||||
|
||||
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
|
||||
correct maximum value. -->
|
||||
<!-- <max_open_files>262144</max_open_files> -->
|
||||
|
||||
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
|
||||
Uncompressed cache is advantageous only for very short queries and in rare cases.
|
||||
-->
|
||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
||||
|
||||
<!-- Approximate size of mark cache, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
You should not lower this value.
|
||||
-->
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
|
||||
|
||||
<!-- Path to data directory, with trailing slash. -->
|
||||
<path>/var/lib/clickhouse/</path>
|
||||
|
||||
<!-- Path to temporary data for processing hard queries. -->
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
|
||||
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
|
||||
<users_config>users.xml</users_config>
|
||||
|
||||
<!-- Default profile of settings.. -->
|
||||
<default_profile>default</default_profile>
|
||||
|
||||
<!-- Default database. -->
|
||||
<default_database>default</default_database>
|
||||
|
||||
<!-- Server time zone could be set here.
|
||||
|
||||
Time zone is used when converting between String and DateTime types,
|
||||
when printing DateTime in text formats and parsing DateTime from text,
|
||||
it is used in date and time related functions, if specific time zone was not passed as an argument.
|
||||
|
||||
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
|
||||
If not specified, system time zone at server startup is used.
|
||||
|
||||
Please note, that server could display time zone alias instead of specified name.
|
||||
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
|
||||
-->
|
||||
<!-- <timezone>Europe/Moscow</timezone> -->
|
||||
|
||||
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
|
||||
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
|
||||
-->
|
||||
<!-- <umask>022</umask> -->
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.yandex/reference_en.html#Distributed
|
||||
-->
|
||||
<remote_servers incl="clickhouse_remote_servers" >
|
||||
<!-- Test only shard config for testing distributed storage -->
|
||||
<test_shard_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost>
|
||||
</remote_servers>
|
||||
|
||||
|
||||
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
|
||||
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
|
||||
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
|
||||
-->
|
||||
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/reference_en.html#Data%20replication
|
||||
-->
|
||||
<zookeeper incl="zookeeper-servers" optional="true" />
|
||||
|
||||
<!-- Substitutions for parameters of replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/reference_en.html#Creating%20replicated%20tables
|
||||
-->
|
||||
<macros incl="macros" optional="true" />
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
|
||||
|
||||
|
||||
<!-- Maximum session timeout, in seconds. Default: 3600. -->
|
||||
<max_session_timeout>3600</max_session_timeout>
|
||||
|
||||
<!-- Default session timeout, in seconds. Default: 60. -->
|
||||
<default_session_timeout>60</default_session_timeout>
|
||||
|
||||
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
|
||||
<!--
|
||||
interval - send every X second
|
||||
root_path - prefix for keys
|
||||
hostname_in_path - append hostname to root_path (default = true)
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
-->
|
||||
<!--
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>60</interval>
|
||||
<root_path>one_min</root_path>
|
||||
<hostname_in_path>true<hostname_in_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
</graphite>
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>1</interval>
|
||||
<root_path>one_sec</root_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>false</asynchronous_metrics>
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
<!-- What table to insert data. If table is not exist, it will be created.
|
||||
When query log structure is changed after system update,
|
||||
then old table will be renamed and new table will be created automatically.
|
||||
-->
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
|
||||
<!-- Interval of flushing data. -->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
|
||||
|
||||
<!-- Uncomment if use part_log
|
||||
<part_log>
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
-->
|
||||
|
||||
|
||||
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
||||
See https://clickhouse.yandex/reference_en.html#Internal%20dictionaries
|
||||
-->
|
||||
|
||||
<!-- Path to file with region hierarchy. -->
|
||||
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
|
||||
|
||||
<!-- Path to directory with files containing names of regions -->
|
||||
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
|
||||
|
||||
|
||||
<!-- Configuration of external dictionaries. See:
|
||||
https://clickhouse.yandex/reference_en.html#External%20Dictionaries
|
||||
-->
|
||||
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
-->
|
||||
<compression incl="clickhouse_compression">
|
||||
<!--
|
||||
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
|
||||
<case>
|
||||
|
||||
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
|
||||
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
|
||||
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
|
||||
|
||||
<!- - What compression method to use. - ->
|
||||
<method>zstd</method>
|
||||
</case>
|
||||
-->
|
||||
</compression>
|
||||
|
||||
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
|
||||
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
|
||||
<distributed_ddl>
|
||||
<!-- Path in ZooKeeper to queue with DDL queries -->
|
||||
<path>/clickhouse/task_queue/ddl</path>
|
||||
</distributed_ddl>
|
||||
|
||||
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||
<!--
|
||||
<merge_tree>
|
||||
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
|
||||
</merge_tree>
|
||||
-->
|
||||
|
||||
<!-- Protection from accidental DROP.
|
||||
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
|
||||
If you want do delete one table and don't want to restart clickhouse-server, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
|
||||
By default max_table_size_to_drop is 50GB, max_table_size_to_drop=0 allows to DROP any tables.
|
||||
Uncomment to disable protection.
|
||||
-->
|
||||
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
|
||||
|
||||
<!-- Example of parameters for GraphiteMergeTree table engine -->
|
||||
<graphite_rollup>
|
||||
<!-- carbon -->
|
||||
<pattern>
|
||||
<regexp>^carbon\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>7776000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- collectd -->
|
||||
<pattern>
|
||||
<regexp>^collectd\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>10</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>43200</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- high -->
|
||||
<pattern>
|
||||
<regexp>^high\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>10</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>172800</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- medium -->
|
||||
<pattern>
|
||||
<regexp>^medium\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- low -->
|
||||
<pattern>
|
||||
<regexp>^low\.</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>15552000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>31536000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>126144000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>252288000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>315360000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<!-- default -->
|
||||
<default>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>864000</age>
|
||||
<precision>900</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>1728000</age>
|
||||
<precision>1800</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3456000</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>10368000</age>
|
||||
<precision>21600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>34560000</age>
|
||||
<precision>43200</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>63072000</age>
|
||||
<precision>86400</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>94608000</age>
|
||||
<precision>604800</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup>
|
||||
|
||||
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
|
||||
The directory will be created if it doesn't exist.
|
||||
-->
|
||||
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
|
||||
</yandex>
|
||||
97
deploy/docker/clickhouse-setup/docker-compose.yaml
Normal file
97
deploy/docker/clickhouse-setup/docker-compose.yaml
Normal file
@@ -0,0 +1,97 @@
|
||||
version: "2.4"
|
||||
|
||||
services:
|
||||
clickhouse:
|
||||
image: yandex/clickhouse-server
|
||||
expose:
|
||||
- 8123
|
||||
- 9000
|
||||
ports:
|
||||
- 9001:9000
|
||||
- 8123:8123
|
||||
volumes:
|
||||
- ./clickhouse-config.xml:/etc/clickhouse-server/config.xml
|
||||
- ./docker-entrypoint-initdb.d/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql
|
||||
healthcheck:
|
||||
# "clickhouse", "client", "-u ${CLICKHOUSE_USER}", "--password ${CLICKHOUSE_PASSWORD}", "-q 'SELECT 1'"
|
||||
test: ["CMD", "wget", "--spider", "-q", "localhost:8123/ping"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
query-service:
|
||||
image: signoz/query-service:0.3.1
|
||||
container_name: query-service
|
||||
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
environment:
|
||||
- ClickHouseUrl=tcp://clickhouse:9000
|
||||
- STORAGE=clickhouse
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.3.2
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
- query-service
|
||||
links:
|
||||
- "query-service"
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
|
||||
otel-collector:
|
||||
image: signoz/otelcol:latest
|
||||
command: ["--config=/etc/otel-collector-config.yaml", "--mem-ballast-size-mib=683"]
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otel-collector-config.yaml
|
||||
ports:
|
||||
- "1777:1777" # pprof extension
|
||||
- "8887:8888" # Prometheus metrics exposed by the agent
|
||||
- "14268:14268" # Jaeger receiver
|
||||
- "55678" # OpenCensus receiver
|
||||
- "55680:55680" # OTLP HTTP/2.0 legacy port
|
||||
- "55681:55681" # OTLP HTTP/1.0 receiver
|
||||
- "4317:4317" # OTLP GRPC receiver
|
||||
- "55679:55679" # zpages extension
|
||||
- "13133" # health_check
|
||||
|
||||
depends_on:
|
||||
clickhouse:
|
||||
condition: service_healthy
|
||||
|
||||
hotrod:
|
||||
image: jaegertracing/example-hotrod:latest
|
||||
container_name: hotrod
|
||||
ports:
|
||||
- "9000:8080"
|
||||
command: ["all"]
|
||||
environment:
|
||||
- JAEGER_ENDPOINT=http://otel-collector:14268/api/traces
|
||||
|
||||
|
||||
load-hotrod:
|
||||
image: "grubykarol/locust:1.2.3-python3.9-alpine3.12"
|
||||
container_name: load-hotrod
|
||||
hostname: load-hotrod
|
||||
ports:
|
||||
- "8089:8089"
|
||||
environment:
|
||||
ATTACKED_HOST: http://hotrod:8080
|
||||
LOCUST_MODE: standalone
|
||||
NO_PROXY: standalone
|
||||
TASK_DELAY_FROM: 5
|
||||
TASK_DELAY_TO: 30
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ../common/locust-scripts:/locust
|
||||
@@ -0,0 +1,27 @@
|
||||
CREATE TABLE IF NOT EXISTS signoz_index (
|
||||
timestamp DateTime64(9) CODEC(Delta, ZSTD(1)),
|
||||
traceID String CODEC(ZSTD(1)),
|
||||
spanID String CODEC(ZSTD(1)),
|
||||
parentSpanID String CODEC(ZSTD(1)),
|
||||
serviceName LowCardinality(String) CODEC(ZSTD(1)),
|
||||
name LowCardinality(String) CODEC(ZSTD(1)),
|
||||
kind Int32 CODEC(ZSTD(1)),
|
||||
durationNano UInt64 CODEC(ZSTD(1)),
|
||||
tags Array(String) CODEC(ZSTD(1)),
|
||||
tagsKeys Array(String) CODEC(ZSTD(1)),
|
||||
tagsValues Array(String) CODEC(ZSTD(1)),
|
||||
statusCode Int64 CODEC(ZSTD(1)),
|
||||
references String CODEC(ZSTD(1)),
|
||||
externalHttpMethod Nullable(String) CODEC(ZSTD(1)),
|
||||
externalHttpUrl Nullable(String) CODEC(ZSTD(1)),
|
||||
component Nullable(String) CODEC(ZSTD(1)),
|
||||
dbSystem Nullable(String) CODEC(ZSTD(1)),
|
||||
dbName Nullable(String) CODEC(ZSTD(1)),
|
||||
dbOperation Nullable(String) CODEC(ZSTD(1)),
|
||||
peerService Nullable(String) CODEC(ZSTD(1)),
|
||||
INDEX idx_tagsKeys tagsKeys TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_tagsValues tagsValues TYPE bloom_filter(0.01) GRANULARITY 64,
|
||||
INDEX idx_duration durationNano TYPE minmax GRANULARITY 1
|
||||
) ENGINE MergeTree()
|
||||
PARTITION BY toDate(timestamp)
|
||||
ORDER BY (serviceName, -toUnixTimestamp(timestamp))
|
||||
39
deploy/docker/clickhouse-setup/otel-collector-config.yaml
Normal file
39
deploy/docker/clickhouse-setup/otel-collector-config.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
memory_limiter:
|
||||
# Same as --mem-ballast-size-mib CLI argument
|
||||
ballast_size_mib: 683
|
||||
# 80% of maximum memory up to 2G
|
||||
limit_mib: 1500
|
||||
# 25% of limit up to 2G
|
||||
spike_limit_mib: 512
|
||||
check_interval: 5s
|
||||
# queued_retry:
|
||||
# num_workers: 4
|
||||
# queue_size: 100
|
||||
# retry_on_failure: true
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
clickhouse:
|
||||
datasource: tcp://clickhouse:9000
|
||||
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [batch]
|
||||
exporters: [clickhouse]
|
||||
@@ -1,6 +1,16 @@
|
||||
server {
|
||||
listen 3000;
|
||||
server_name _;
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
@@ -140,6 +140,11 @@ services:
|
||||
env_file:
|
||||
- environment_tiny/router
|
||||
- environment_tiny/common
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.2.0
|
||||
@@ -158,7 +163,7 @@ services:
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.2.2
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.3.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
@@ -169,11 +174,15 @@ services:
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.2.3
|
||||
image: signoz/frontend:0.3.2
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
@@ -183,7 +192,7 @@ services:
|
||||
ports:
|
||||
- "3000:3000"
|
||||
volumes:
|
||||
- ./nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
- ../common/nginx-config.conf:/etc/nginx/conf.d/default.conf
|
||||
|
||||
create-supervisor:
|
||||
image: theithollow/hollowapp-blog:curl
|
||||
@@ -260,5 +269,5 @@ services:
|
||||
QUIET_MODE: "${QUIET_MODE:-false}"
|
||||
LOCUST_OPTS: "--headless -u 10 -r 1"
|
||||
volumes:
|
||||
- ./locust-scripts:/locust
|
||||
- ../common/locust-scripts:/locust
|
||||
|
||||
@@ -135,6 +135,11 @@ services:
|
||||
- router
|
||||
env_file:
|
||||
- environment_small/router
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--spider", "-q", "http://router:8888/druid/coordinator/v1/datasources/flattened_spans"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
flatten-processor:
|
||||
image: signoz/flattener-processor:0.2.0
|
||||
@@ -153,7 +158,7 @@ services:
|
||||
|
||||
|
||||
query-service:
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.2.2
|
||||
image: signoz.docker.scarf.sh/signoz/query-service:0.3.1
|
||||
container_name: query-service
|
||||
|
||||
depends_on:
|
||||
@@ -164,11 +169,15 @@ services:
|
||||
environment:
|
||||
- DruidClientUrl=http://router:8888
|
||||
- DruidDatasource=flattened_spans
|
||||
- STORAGE=druid
|
||||
- POSTHOG_API_KEY=H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w
|
||||
|
||||
depends_on:
|
||||
router:
|
||||
condition: service_healthy
|
||||
|
||||
frontend:
|
||||
image: signoz/frontend:0.2.3
|
||||
image: signoz/frontend:0.3.2
|
||||
container_name: frontend
|
||||
|
||||
depends_on:
|
||||
26
deploy/docker/druid-kafka-setup/environment_tiny/common_test
Normal file
26
deploy/docker/druid-kafka-setup/environment_tiny/common_test
Normal file
@@ -0,0 +1,26 @@
|
||||
# For S3 storage
|
||||
|
||||
druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service", "druid-s3-extensions"]
|
||||
|
||||
|
||||
druid_storage_type=s3
|
||||
druid_storage_bucket=solvzy-test3
|
||||
druid_storage_baseKey=druid/segments
|
||||
|
||||
AWS_ACCESS_KEY_ID=AKIARKCF5OX3CMI3XRXC
|
||||
AWS_SECRET_ACCESS_KEY=KxuYpczA7a3IQ44U7Bd7DI+LZgJ26tmKr2cnkEVB
|
||||
AWS_REGION=us-east-2
|
||||
|
||||
druid_indexer_logs_type=s3
|
||||
druid_indexer_logs_s3Bucket=solvzy-test3
|
||||
druid_indexer_logs_s3Prefix=druid/indexing-logs
|
||||
|
||||
# -----------------------------------------------------------
|
||||
# For local storage
|
||||
# druid_extensions_loadList=["druid-histogram", "druid-datasketches", "druid-lookups-cached-global", "postgresql-metadata-storage", "druid-kafka-indexing-service"]
|
||||
|
||||
# druid_storage_type=local
|
||||
# druid_storage_storageDirectory=/opt/data/segments
|
||||
# druid_indexer_logs_type=file
|
||||
# druid_indexer_logs_directory=/opt/data/indexing-logs
|
||||
|
||||
@@ -1,256 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
is_command_present() {
|
||||
type "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
is_mac() {
|
||||
[[ $OSTYPE == darwin* ]]
|
||||
}
|
||||
|
||||
|
||||
check_k8s_setup() {
|
||||
echo "Checking your k8s setup status"
|
||||
if ! is_command_present kubectl; then
|
||||
echo "Please install kubectl on your machine"
|
||||
exit 1
|
||||
else
|
||||
|
||||
if ! is_command_present jq; then
|
||||
install_jq
|
||||
fi
|
||||
clusters=`kubectl config view -o json | jq -r '."current-context"'`
|
||||
if [[ ! -n $clusters ]]; then
|
||||
echo "Please setup a k8s cluster & config kubectl to connect to it"
|
||||
exit 1
|
||||
fi
|
||||
k8s_minor_version=`kubectl version --short -o json | jq ."serverVersion.minor" | sed 's/[^0-9]*//g'`
|
||||
# if [[ $k8s_minor_version < 18 ]]; then
|
||||
# echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
# echo "SigNoz deployments require Kubernetes >= v1.18. Found version: v1.$k8s_minor_version"
|
||||
# echo "+++++++++++ ++++++++++++++++++++++++++++"
|
||||
# exit 1
|
||||
# fi;
|
||||
fi
|
||||
}
|
||||
|
||||
install_jq(){
|
||||
if [ $package_manager == "brew" ]; then
|
||||
brew install jq
|
||||
elif [ $package_manager == "yum" ]; then
|
||||
yum_cmd="sudo yum --assumeyes --quiet"
|
||||
$yum_cmd install jq
|
||||
else
|
||||
apt_cmd="sudo apt-get --yes --quiet"
|
||||
$apt_cmd update
|
||||
$apt_cmd install jq
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
check_os() {
|
||||
if is_mac; then
|
||||
package_manager="brew"
|
||||
desired_os=1
|
||||
os="Mac"
|
||||
return
|
||||
fi
|
||||
|
||||
os_name="$(cat /etc/*-release | awk -F= '$1 == "NAME" { gsub(/"/, ""); print $2; exit }')"
|
||||
|
||||
case "$os_name" in
|
||||
Ubuntu*)
|
||||
desired_os=1
|
||||
os="ubuntu"
|
||||
package_manager="apt-get"
|
||||
;;
|
||||
Debian*)
|
||||
desired_os=1
|
||||
os="debian"
|
||||
package_manager="apt-get"
|
||||
;;
|
||||
Red\ Hat*)
|
||||
desired_os=1
|
||||
os="red hat"
|
||||
package_manager="yum"
|
||||
;;
|
||||
CentOS*)
|
||||
desired_os=1
|
||||
os="centos"
|
||||
package_manager="yum"
|
||||
;;
|
||||
*)
|
||||
desired_os=0
|
||||
os="Not Found"
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
echo_contact_support() {
|
||||
echo "Please contact <support@signoz.io> with your OS details and version${1:-.}"
|
||||
}
|
||||
|
||||
bye() { # Prints a friendly good bye message and exits the script.
|
||||
set +o errexit
|
||||
echo "Please share your email to receive support with the installation"
|
||||
read -rp 'Email: ' email
|
||||
|
||||
while [[ $email == "" ]]
|
||||
do
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "platform": "k8s", "k8s_minor_version": "'"$k8s_minor_version"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
echo -e "\nExiting for now. Bye! \U1F44B\n"
|
||||
exit 1
|
||||
}
|
||||
|
||||
deploy_app() {
|
||||
kubectl apply -f "$install_dir/config-template"
|
||||
kubectl apply -f "$install_dir"
|
||||
}
|
||||
|
||||
wait_for_application_start() {
|
||||
local timeout=$1
|
||||
address=$custom_domain
|
||||
if [[ "$ssl_enable" == "true" ]]; then
|
||||
protocol="https"
|
||||
else
|
||||
protocol="http"
|
||||
fi
|
||||
# The while loop is important because for-loops don't work for dynamic values
|
||||
while [[ $timeout -gt 0 ]]; do
|
||||
if [[ $address == "" || $address == null ]]; then
|
||||
address=`kubectl get ingress appsmith-ingress -o json | jq -r '.status.loadBalancer.ingress[0].ip'`
|
||||
fi
|
||||
status_code="$(curl -s -o /dev/null -w "%{http_code}" $protocol://$address/api/v1 || true)"
|
||||
if [[ status_code -eq 401 ]]; then
|
||||
break
|
||||
else
|
||||
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds...\r\c"
|
||||
fi
|
||||
((timeout--))
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo ""
|
||||
}
|
||||
|
||||
|
||||
echo -e "👋 Thank you for trying out SigNoz! "
|
||||
echo ""
|
||||
|
||||
|
||||
# Checking OS and assigning package manager
|
||||
desired_os=0
|
||||
os=""
|
||||
echo -e "🕵️ Detecting your OS"
|
||||
check_os
|
||||
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
|
||||
|
||||
# Run bye if failure happens
|
||||
trap bye EXIT
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "platform": "k8s", "k8s_minor_version": "'"$k8s_minor_version"'" } }'
|
||||
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
# Check for kubernetes setup
|
||||
check_k8s_setup
|
||||
|
||||
echo ""
|
||||
echo "Deploy Appmisth on your cluster"
|
||||
echo ""
|
||||
|
||||
deploy_app
|
||||
|
||||
wait_for_application_start 60
|
||||
|
||||
|
||||
if [[ $status_code -ne 200 ]]; then
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
|
||||
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "platform": "k8s", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"' } }'
|
||||
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
exit 1
|
||||
|
||||
else
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"} }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
|
||||
echo "Your installation is complete!"
|
||||
echo ""
|
||||
echo "Your frontend is running on 'http://localhost:3000'."
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
echo "Need help Getting Started?"
|
||||
echo "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
||||
echo ""
|
||||
echo "Please share your email to receive support & updates about SigNoz!"
|
||||
read -rp 'Email: ' email
|
||||
|
||||
while [[ $email == "" ]]
|
||||
do
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "platform": "k8s" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
if has_curl; then
|
||||
curl -sfL -d "$DATA" --header "$HEADER" "$URL" > /dev/null 2>&1
|
||||
elif has_wget; then
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
echo -e "\nThank you!\n"
|
||||
@@ -2,6 +2,16 @@
|
||||
|
||||
set -o errexit
|
||||
|
||||
# Regular Colors
|
||||
Black='\033[0;30m' # Black
|
||||
Red='\[\e[0;31m\]' # Red
|
||||
Green='\033[0;32m' # Green
|
||||
Yellow='\033[0;33m' # Yellow
|
||||
Blue='\033[0;34m' # Blue
|
||||
Purple='\033[0;35m' # Purple
|
||||
Cyan='\033[0;36m' # Cyan
|
||||
White='\033[0;37m' # White
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
is_command_present() {
|
||||
type "$1" >/dev/null 2>&1
|
||||
@@ -88,7 +98,7 @@ check_os() {
|
||||
# The script should error out in case they aren't available
|
||||
check_ports_occupied() {
|
||||
local port_check_output
|
||||
local ports_pattern="80|443"
|
||||
local ports_pattern="80|3000|8080"
|
||||
|
||||
if is_mac; then
|
||||
port_check_output="$(netstat -anp tcp | awk '$6 == "LISTEN" && $4 ~ /^.*\.('"$ports_pattern"')$/')"
|
||||
@@ -192,7 +202,7 @@ install_docker_compose() {
|
||||
echo ""
|
||||
fi
|
||||
else
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found" } }'
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker Compose not found", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
@@ -212,8 +222,7 @@ install_docker_compose() {
|
||||
|
||||
start_docker() {
|
||||
echo "Starting Docker ..."
|
||||
if [ $os == "Mac" ]
|
||||
then
|
||||
if [ $os = "Mac" ]; then
|
||||
open --background -a Docker && while ! docker system info > /dev/null 2>&1; do sleep 1; done
|
||||
else
|
||||
if ! sudo systemctl is-active docker.service > /dev/null; then
|
||||
@@ -231,16 +240,17 @@ wait_for_containers_start() {
|
||||
if [[ status_code -eq 200 ]]; then
|
||||
break
|
||||
else
|
||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
LEN_SUPERVISORS="${#SUPERVISORS}"
|
||||
if [ $setup_type == 'druid' ]; then
|
||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
LEN_SUPERVISORS="${#SUPERVISORS}"
|
||||
|
||||
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
|
||||
echo "No Supervisors found... Re-applying docker compose\n"
|
||||
sudo docker-compose -f ./docker/docker-compose-tiny.yaml up -d
|
||||
if [[ LEN_SUPERVISORS -ne 19 && $timeout -eq 50 ]];then
|
||||
echo -e "\n🟠 Supervisors taking time to start ⏳ ... let's wait for some more time ⏱️\n\n"
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up -d
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds...\r\c"
|
||||
echo -ne "Waiting for all containers to start. This check will timeout in $timeout seconds ...\r\c"
|
||||
fi
|
||||
((timeout--))
|
||||
sleep 1
|
||||
@@ -253,14 +263,18 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
if [ "$?" -ne 0 ]; then
|
||||
set +o errexit
|
||||
|
||||
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
else
|
||||
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
||||
fi
|
||||
# echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
echo "Please share your email to receive support with the installation"
|
||||
echo -e "\n📨 Please share your email to receive support with the installation"
|
||||
read -rp 'Email: ' email
|
||||
|
||||
while [[ $email == "" ]]
|
||||
@@ -268,7 +282,7 @@ bye() { # Prints a friendly good bye message and exits the script.
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'" } }'
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Support", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
@@ -294,17 +308,39 @@ echo ""
|
||||
# Checking OS and assigning package manager
|
||||
desired_os=0
|
||||
os=""
|
||||
echo -e "🕵️ Detecting your OS"
|
||||
echo -e "Detecting your OS ..."
|
||||
check_os
|
||||
|
||||
|
||||
SIGNOZ_INSTALLATION_ID=$(curl -s 'https://api64.ipify.org')
|
||||
|
||||
echo ""
|
||||
|
||||
echo -e "👉 ${RED}Two ways to go forward\n"
|
||||
echo -e "${RED}1) ClickHouse as database (default)\n"
|
||||
echo -e "${RED}2) Kafka + Druid setup to handle scale (recommended for production use)\n"
|
||||
read -p "⚙️ Enter your preference (1/2):" choice_setup
|
||||
|
||||
while [[ $choice_setup != "1" && $choice_setup != "2" && $choice_setup != "" ]]
|
||||
do
|
||||
# echo $choice_setup
|
||||
echo -e "\n❌ ${CYAN}Please enter either 1 or 2"
|
||||
read -p "⚙️ Enter your preference (1/2): " choice_setup
|
||||
# echo $choice_setup
|
||||
done
|
||||
|
||||
if [[ $choice_setup == "1" || $choice_setup == "" ]];then
|
||||
setup_type='clickhouse'
|
||||
else
|
||||
setup_type='druid'
|
||||
fi
|
||||
|
||||
echo -e "\n✅ ${CYAN}You have chosen: ${setup_type} setup\n"
|
||||
|
||||
# Run bye if failure happens
|
||||
trap bye EXIT
|
||||
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'" } }'
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Started", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
@@ -316,7 +352,7 @@ fi
|
||||
|
||||
|
||||
if [[ $desired_os -eq 0 ]];then
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported" } }'
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "OS Not Supported", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
@@ -340,7 +376,7 @@ if ! is_command_present docker; then
|
||||
echo "Docker Desktop must be installed manually on Mac OS to proceed. Docker can only be installed automatically on Ubuntu / openSUSE / SLES / Redhat / Cent OS"
|
||||
echo "https://docs.docker.com/docker-for-mac/install/"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed" } }'
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Docker not installed", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
@@ -358,43 +394,59 @@ if ! is_command_present docker-compose; then
|
||||
install_docker_compose
|
||||
fi
|
||||
|
||||
# if ! is_command_present docker-compose; then
|
||||
# install_docker_machine
|
||||
# docker-machine create -d virtualbox --virtualbox-memory 3584 signoz
|
||||
|
||||
# fi
|
||||
|
||||
|
||||
start_docker
|
||||
|
||||
|
||||
# sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up -d --remove-orphans || true
|
||||
|
||||
|
||||
echo ""
|
||||
echo "Pulling the latest container images for SigNoz. To run as sudo it will ask for system password."
|
||||
sudo docker-compose -f ./docker/docker-compose-tiny.yaml pull
|
||||
echo -e "\n🟡 Pulling the latest container images for SigNoz. To run as sudo it may ask for system password\n"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml pull
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml pull
|
||||
fi
|
||||
|
||||
|
||||
echo ""
|
||||
echo "Starting the SigNoz containers. It may take a few minute ..."
|
||||
echo "🟡 Starting the SigNoz containers. It may take a few minutes ..."
|
||||
echo
|
||||
# The docker-compose command does some nasty stuff for the `--detach` functionality. So we add a `|| true` so that the
|
||||
# script doesn't exit because this command looks like it failed to do it's thing.
|
||||
sudo docker-compose -f ./docker/docker-compose-tiny.yaml up --detach --remove-orphans || true
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
sudo docker-compose -f ./docker/clickhouse-setup/docker-compose.yaml up --detach --remove-orphans || true
|
||||
else
|
||||
sudo docker-compose -f ./docker/druid-kafka-setup/docker-compose-tiny.yaml up --detach --remove-orphans || true
|
||||
fi
|
||||
|
||||
wait_for_containers_start 60
|
||||
echo ""
|
||||
|
||||
if [[ $status_code -ne 200 ]]; then
|
||||
echo "+++++++++++ ERROR ++++++++++++++++++++++"
|
||||
echo "The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo "🔴 The containers didn't seem to start correctly. Please run the following command to check containers that may have errored out:"
|
||||
echo ""
|
||||
echo -e "sudo docker-compose -f docker/docker-compose-tiny.yaml ps -a"
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker#troubleshooting"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo -e "sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml ps -a"
|
||||
else
|
||||
echo -e "sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml ps -a"
|
||||
fi
|
||||
echo "Please read our troubleshooting guide https://signoz.io/docs/deployment/docker/#troubleshooting-of-common-issues"
|
||||
echo "or reach us on SigNoz for support https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
||||
echo "++++++++++++++++++++++++++++++++++++++++"
|
||||
|
||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "data": "some_checks", "setup_type": "'"$setup_type"'" } }'
|
||||
else
|
||||
SUPERVISORS="$(curl -so - http://localhost:8888/druid/indexer/v1/supervisor)"
|
||||
|
||||
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
|
||||
DATASOURCES="$(curl -so - http://localhost:8888/druid/coordinator/v1/datasources)"
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"' } }'
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Error - Checks", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "error": "Containers not started", "SUPERVISORS": '"$SUPERVISORS"', "DATASOURCES": '"$DATASOURCES"', "setup_type": "'"$setup_type"'" } }'
|
||||
fi
|
||||
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
@@ -408,7 +460,7 @@ if [[ $status_code -ne 200 ]]; then
|
||||
exit 1
|
||||
|
||||
else
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"} }'
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Installation Success", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'"}, "setup_type": "'"$setup_type"'" }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
@@ -418,17 +470,25 @@ else
|
||||
wget -q --post-data="$DATA" --header="$HEADER" "$URL" > /dev/null 2>&1
|
||||
fi
|
||||
echo "++++++++++++++++++ SUCCESS ++++++++++++++++++++++"
|
||||
echo "Your installation is complete!"
|
||||
echo ""
|
||||
echo "Your frontend is running on 'http://localhost:3000'."
|
||||
echo "🟢 Your installation is complete!"
|
||||
echo ""
|
||||
echo -e "🟢 Your frontend is running on http://localhost:3000"
|
||||
echo ""
|
||||
|
||||
if [ $setup_type == 'clickhouse' ]; then
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/clickhouse-setup/docker-compose.yaml down -v"
|
||||
else
|
||||
echo "ℹ️ To bring down SigNoz and clean volumes : sudo docker-compose -f docker/druid-kafka-setup/docker-compose-tiny.yaml down -v"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "+++++++++++++++++++++++++++++++++++++++++++++++++"
|
||||
echo ""
|
||||
echo "Need help Getting Started?"
|
||||
echo "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
||||
echo "👉 Need help Getting Started?"
|
||||
echo -e "Join us on Slack https://join.slack.com/t/signoz-community/shared_invite/zt-lrjknbbp-J_mI13rlw8pGF4EWBnorJA"
|
||||
echo ""
|
||||
echo "Please share your email to receive support & updates about SigNoz!"
|
||||
echo -e "\n📨 Please share your email to receive support & updates about SigNoz!"
|
||||
read -rp 'Email: ' email
|
||||
|
||||
while [[ $email == "" ]]
|
||||
@@ -436,7 +496,7 @@ else
|
||||
read -rp 'Email: ' email
|
||||
done
|
||||
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'" } }'
|
||||
DATA='{ "api_key": "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w", "type": "capture", "event": "Identify Successful Installation", "distinct_id": "'"$SIGNOZ_INSTALLATION_ID"'", "properties": { "os": "'"$os"'", "email": "'"$email"'", "setup_type": "'"$setup_type"'" } }'
|
||||
URL="https://app.posthog.com/capture"
|
||||
HEADER="Content-Type: application/json"
|
||||
|
||||
@@ -448,28 +508,4 @@ else
|
||||
|
||||
fi
|
||||
|
||||
echo -e "\nThank you!\n"
|
||||
|
||||
|
||||
##### Changing default memory limit of docker ############
|
||||
# # Check if memory is less and Confirm to increase size of docker machine
|
||||
# # https://github.com/docker/machine/releases
|
||||
# # On OS X
|
||||
|
||||
# $ curl -L https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/usr/local/bin/docker-machine && \
|
||||
# chmod +x /usr/local/bin/docker-machine
|
||||
# # On Linux
|
||||
|
||||
# $ curl -L https://github.com/docker/machine/releases/download/v0.16.2/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine &&
|
||||
# chmod +x /tmp/docker-machine &&
|
||||
# sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
|
||||
|
||||
# VBoxManage list vms
|
||||
# docker-machine stop
|
||||
# VBoxManage modifyvm default --cpus 2
|
||||
# VBoxManage modifyvm default --memory 4096
|
||||
# docker-machine start
|
||||
|
||||
# VBoxManage showvminfo default | grep Memory
|
||||
# VBoxManage showvminfo default | grep CPU
|
||||
|
||||
echo -e "\n🙏 Thank you!\n"
|
||||
@@ -5,64 +5,72 @@ metadata:
|
||||
data:
|
||||
supervisor-spec.json: |
|
||||
{
|
||||
"type": "kafka",
|
||||
"dataSchema": {
|
||||
"dataSource": "flattened_spans",
|
||||
"parser": {
|
||||
"type": "string",
|
||||
"parseSpec": {
|
||||
"format": "json",
|
||||
"timestampSpec": {
|
||||
"column": "StartTimeUnixNano",
|
||||
"format": "nano"
|
||||
},
|
||||
"dimensionsSpec": {
|
||||
"dimensions": [
|
||||
"TraceId",
|
||||
"SpanId",
|
||||
"ParentSpanId",
|
||||
"Name",
|
||||
"ServiceName",
|
||||
"References",
|
||||
"Tags",
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsKeys",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsValues",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{ "name": "DurationNano", "type": "Long" },
|
||||
{ "name": "Kind", "type": "int" },
|
||||
{ "name": "StatusCode", "type": "int" }
|
||||
]
|
||||
"type": "kafka",
|
||||
"dataSchema": {
|
||||
"dataSource": "flattened_spans",
|
||||
"parser": {
|
||||
"type": "string",
|
||||
"parseSpec": {
|
||||
"format": "json",
|
||||
"timestampSpec": {
|
||||
"column": "StartTimeUnixNano",
|
||||
"format": "nano"
|
||||
},
|
||||
"dimensionsSpec": {
|
||||
"dimensions": [
|
||||
"TraceId",
|
||||
"SpanId",
|
||||
"ParentSpanId",
|
||||
"Name",
|
||||
"ServiceName",
|
||||
"References",
|
||||
"Tags",
|
||||
"ExternalHttpMethod",
|
||||
"ExternalHttpUrl",
|
||||
"Component",
|
||||
"DBSystem",
|
||||
"DBName",
|
||||
"DBOperation",
|
||||
"PeerService",
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsKeys",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"name": "TagsValues",
|
||||
"multiValueHandling": "ARRAY"
|
||||
},
|
||||
{ "name": "DurationNano", "type": "Long" },
|
||||
{ "name": "Kind", "type": "int" },
|
||||
{ "name": "StatusCode", "type": "int" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"metricsSpec" : [
|
||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
||||
],
|
||||
"granularitySpec": {
|
||||
"type": "uniform",
|
||||
"segmentGranularity": "DAY",
|
||||
"queryGranularity": "NONE",
|
||||
"rollup": false
|
||||
}
|
||||
},
|
||||
"metricsSpec" : [
|
||||
{ "type": "quantilesDoublesSketch", "name": "QuantileDuration", "fieldName": "DurationNano" }
|
||||
],
|
||||
"granularitySpec": {
|
||||
"type": "uniform",
|
||||
"segmentGranularity": "DAY",
|
||||
"queryGranularity": "NONE",
|
||||
"rollup": false
|
||||
}
|
||||
},
|
||||
"tuningConfig": {
|
||||
"type": "kafka",
|
||||
"reportParseExceptions": true
|
||||
},
|
||||
"ioConfig": {
|
||||
"topic": "flattened_spans",
|
||||
"replicas": 1,
|
||||
"taskDuration": "PT20M",
|
||||
"completionTimeout": "PT30M",
|
||||
"consumerProperties": {
|
||||
"bootstrap.servers": "signoz-kafka:9092"
|
||||
"tuningConfig": {
|
||||
"type": "kafka",
|
||||
"reportParseExceptions": true
|
||||
},
|
||||
"ioConfig": {
|
||||
"topic": "flattened_spans",
|
||||
"replicas": 1,
|
||||
"taskDuration": "PT20M",
|
||||
"completionTimeout": "PT30M",
|
||||
"consumerProperties": {
|
||||
"bootstrap.servers": "signoz-kafka:9092"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,14 +8,14 @@ metadata:
|
||||
data:
|
||||
otel-collector-config: |
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
jaeger:
|
||||
protocols:
|
||||
grpc:
|
||||
thrift_http:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
http:
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
@@ -36,9 +36,16 @@ data:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
kafka:
|
||||
kafka/traces:
|
||||
brokers:
|
||||
- signoz-kafka:9092
|
||||
topic: 'otlp_spans'
|
||||
protocol_version: 2.0.0
|
||||
|
||||
kafka/metrics:
|
||||
brokers:
|
||||
- signoz-kafka:9092
|
||||
topic: 'otlp_metrics'
|
||||
protocol_version: 2.0.0
|
||||
service:
|
||||
extensions: [health_check, zpages]
|
||||
@@ -46,8 +53,8 @@ data:
|
||||
traces:
|
||||
receivers: [jaeger, otlp]
|
||||
processors: [memory_limiter, batch, queued_retry]
|
||||
exporters: [kafka]
|
||||
exporters: [kafka/traces]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [kafka]
|
||||
exporters: [kafka/metrics]
|
||||
@@ -13,9 +13,9 @@ dependencies:
|
||||
version: 0.2.0
|
||||
- name: query-service
|
||||
repository: file://./signoz-charts/query-service
|
||||
version: 0.2.2
|
||||
version: 0.3.1
|
||||
- name: frontend
|
||||
repository: file://./signoz-charts/frontend
|
||||
version: 0.2.3
|
||||
digest: sha256:31c8e3a8a4c89d0e6071c6687f074e88b3eed8ce86310314e5b6f94e5d5017be
|
||||
generated: "2021-05-18T16:54:30.24831+05:30"
|
||||
version: 0.3.2
|
||||
digest: sha256:4501accff11231878f58b6d3626263455241ecb8a9d3317d36caa1474cfb36e7
|
||||
generated: "2021-06-24T23:01:02.327558+05:30"
|
||||
|
||||
@@ -15,12 +15,12 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.2.2
|
||||
version: 0.3.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
appVersion: 0.2.2
|
||||
appVersion: 0.3.2
|
||||
|
||||
dependencies:
|
||||
- name: zookeeper
|
||||
@@ -37,7 +37,7 @@ dependencies:
|
||||
version: 0.2.0
|
||||
- name: query-service
|
||||
repository: "file://./signoz-charts/query-service"
|
||||
version: 0.2.2
|
||||
version: 0.3.1
|
||||
- name: frontend
|
||||
repository: "file://./signoz-charts/frontend"
|
||||
version: 0.2.3
|
||||
version: 0.3.2
|
||||
@@ -14,8 +14,8 @@ type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.2.3
|
||||
version: 0.3.2
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.2.3
|
||||
appVersion: 0.3.2
|
||||
|
||||
@@ -9,6 +9,16 @@ data:
|
||||
server {
|
||||
listen {{ .Values.service.port }};
|
||||
server_name _;
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
|
||||
@@ -14,8 +14,8 @@ type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.2.2
|
||||
version: 0.3.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 0.2.2
|
||||
appVersion: 0.3.1
|
||||
|
||||
@@ -36,7 +36,8 @@ spec:
|
||||
value: {{ .Values.configVars.DruidClientUrl }}
|
||||
- name: DruidDatasource
|
||||
value: {{ .Values.configVars.DruidDatasource }}
|
||||
|
||||
- name: STORAGE
|
||||
value: {{ .Values.configVars.STORAGE }}
|
||||
|
||||
# livenessProbe:
|
||||
# httpGet:
|
||||
|
||||
@@ -16,6 +16,7 @@ fullnameOverride: ""
|
||||
configVars:
|
||||
DruidClientUrl: http://signoz-druid-router:8888
|
||||
DruidDatasource: flattened_spans
|
||||
STORAGE: druid
|
||||
POSTHOG_API_KEY: "H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w"
|
||||
|
||||
|
||||
|
||||
@@ -10,6 +10,9 @@ kafka:
|
||||
zookeeperConnectionTimeoutMs: 6000
|
||||
|
||||
druid:
|
||||
image:
|
||||
tag: 0.21.1-rc2
|
||||
|
||||
configVars:
|
||||
|
||||
# To store data on local disks attached
|
||||
@@ -45,3 +48,4 @@ query-service:
|
||||
configVars:
|
||||
DruidClientUrl: http://signoz-druid-router:8888
|
||||
DruidDatasource: flattened_spans
|
||||
STORAGE: druid
|
||||
|
||||
1
frontend/.nvmrc
Normal file
1
frontend/.nvmrc
Normal file
@@ -0,0 +1 @@
|
||||
12.13.0
|
||||
@@ -1,6 +1,15 @@
|
||||
server {
|
||||
listen 3000;
|
||||
server_name _;
|
||||
|
||||
gzip on;
|
||||
gzip_static on;
|
||||
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
|
||||
gzip_proxied any;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 6;
|
||||
gzip_buffers 16 8k;
|
||||
gzip_http_version 1.1;
|
||||
|
||||
location / {
|
||||
root /usr/share/nginx/html;
|
||||
|
||||
@@ -9,13 +9,15 @@
|
||||
"build": "webpack --config=webpack.config.prod.js",
|
||||
"prettify": "prettier --write ."
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.13.0"
|
||||
},
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@ant-design/icons": "^4.6.2",
|
||||
"@auth0/auth0-react": "^1.2.0",
|
||||
"@babel/core": "7.12.3",
|
||||
"@material-ui/core": "^4.0.0",
|
||||
"@pmmmwh/react-refresh-webpack-plugin": "0.4.2",
|
||||
"@svgr/webpack": "5.4.0",
|
||||
"@testing-library/jest-dom": "^5.11.4",
|
||||
@@ -48,8 +50,6 @@
|
||||
"chart.js": "^2.9.4",
|
||||
"css-loader": "4.3.0",
|
||||
"d3": "^6.2.0",
|
||||
"d3-array": "^2.8.0",
|
||||
"d3-ease": "^2.0.0",
|
||||
"d3-flame-graph": "^3.1.1",
|
||||
"d3-tip": "^0.9.1",
|
||||
"dotenv": "8.2.0",
|
||||
@@ -72,11 +72,7 @@
|
||||
"jest-circus": "26.6.0",
|
||||
"jest-resolve": "26.6.0",
|
||||
"jest-watch-typeahead": "0.6.1",
|
||||
"material-ui-chip-input": "^2.0.0-beta.2",
|
||||
"mini-css-extract-plugin": "0.11.3",
|
||||
"optimize-css-assets-webpack-plugin": "5.0.4",
|
||||
"pnp-webpack-plugin": "1.6.4",
|
||||
"postcss-flexbugs-fixes": "4.2.1",
|
||||
"postcss-loader": "3.0.0",
|
||||
"postcss-normalize": "8.0.1",
|
||||
"postcss-preset-env": "6.7.0",
|
||||
@@ -96,7 +92,6 @@
|
||||
"react-refresh": "^0.8.3",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"react-vis": "^1.11.7",
|
||||
"recharts": "^1.8.5",
|
||||
"redux": "^4.0.5",
|
||||
"redux-thunk": "^2.3.0",
|
||||
"resolve": "1.18.1",
|
||||
@@ -140,8 +135,10 @@
|
||||
"@babel/preset-env": "^7.12.17",
|
||||
"@babel/preset-react": "^7.12.13",
|
||||
"@babel/preset-typescript": "^7.12.17",
|
||||
"@types/lodash-es": "^4.17.4",
|
||||
"autoprefixer": "^9.0.0",
|
||||
"babel-plugin-styled-components": "^1.12.0",
|
||||
"compression-webpack-plugin": "^8.0.0",
|
||||
"copy-webpack-plugin": "^7.0.0",
|
||||
"gulp": "^4.0.2",
|
||||
"gulp-csso": "^4.0.1",
|
||||
@@ -151,6 +148,7 @@
|
||||
"husky": "4.3.8",
|
||||
"less-plugin-npm-import": "^2.1.0",
|
||||
"lint-staged": "10.5.3",
|
||||
"lodash-es": "^4.17.21",
|
||||
"prettier": "2.2.1",
|
||||
"react-hot-loader": "^4.13.0",
|
||||
"react-is": "^17.0.1",
|
||||
|
||||
@@ -1,18 +1,8 @@
|
||||
@import "~antd/dist/antd.dark.css";
|
||||
@import "~antd/dist/antd.compact.css";
|
||||
|
||||
.ant-space-item {
|
||||
margin-right: 0 !important;
|
||||
}
|
||||
/* #components-layout-demo-side .logo {
|
||||
height: 32px;
|
||||
margin: 16px;
|
||||
background: rgba(255, 255, 255, 0.3);
|
||||
}
|
||||
|
||||
.site-layout .site-layout-background {
|
||||
background: #fff;
|
||||
} */
|
||||
.instrument-card{
|
||||
border-radius: 4px;
|
||||
background: #313131;
|
||||
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
SettingsPage,
|
||||
IntstrumentationPage,
|
||||
} from "Src/pages";
|
||||
import { RouteProvider } from "./RouteProvider";
|
||||
|
||||
const App = () => {
|
||||
const { status } = useThemeSwitcher();
|
||||
@@ -30,36 +31,45 @@ const App = () => {
|
||||
<Suspense fallback={<Spin size="large" />}>
|
||||
<Route path={"/"}>
|
||||
<Switch>
|
||||
<BaseLayout>
|
||||
<Route path={ROUTES.SIGN_UP} exact component={Signup} />
|
||||
<Route path={ROUTES.APPLICATION} exact component={ServicesTable} />
|
||||
<Route path={ROUTES.SERVICE_METRICS} exact component={ServiceMetrics} />
|
||||
<Route path={ROUTES.SERVICE_MAP} exact component={ServiceMap} />
|
||||
<Route path={ROUTES.TRACES} exact component={TraceDetail} />
|
||||
<Route path={ROUTES.TRACE_GRAPH} exact component={TraceGraph} />
|
||||
<Route path={ROUTES.SETTINGS} exact component={SettingsPage} />
|
||||
<Route
|
||||
path={ROUTES.INSTRUMENTATION}
|
||||
exact
|
||||
component={IntstrumentationPage}
|
||||
/>
|
||||
<Route
|
||||
path={ROUTES.USAGE_EXPLORER}
|
||||
exactexact
|
||||
component={UsageExplorer}
|
||||
/>
|
||||
<Route
|
||||
path="/"
|
||||
exact
|
||||
render={() => {
|
||||
return localStorage.getItem(IS_LOGGED_IN) === "yes" ? (
|
||||
<Redirect to={ROUTES.APPLICATION} />
|
||||
) : (
|
||||
<Redirect to={ROUTES.SIGN_UP} />
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</BaseLayout>
|
||||
<RouteProvider>
|
||||
<BaseLayout>
|
||||
<Suspense fallback={<Spin size="large" />}>
|
||||
<Route path={ROUTES.SIGN_UP} exact component={Signup} />
|
||||
<Route path={ROUTES.APPLICATION} exact component={ServicesTable} />
|
||||
<Route
|
||||
path={ROUTES.SERVICE_METRICS}
|
||||
exact
|
||||
component={ServiceMetrics}
|
||||
/>
|
||||
<Route path={ROUTES.SERVICE_MAP} exact component={ServiceMap} />
|
||||
<Route path={ROUTES.TRACES} exact component={TraceDetail} />
|
||||
<Route path={ROUTES.TRACE_GRAPH} exact component={TraceGraph} />
|
||||
<Route path={ROUTES.SETTINGS} exact component={SettingsPage} />
|
||||
<Route
|
||||
path={ROUTES.INSTRUMENTATION}
|
||||
exact
|
||||
component={IntstrumentationPage}
|
||||
/>
|
||||
<Route
|
||||
path={ROUTES.USAGE_EXPLORER}
|
||||
exactexact
|
||||
component={UsageExplorer}
|
||||
/>
|
||||
<Route
|
||||
path="/"
|
||||
exact
|
||||
render={() => {
|
||||
return localStorage.getItem(IS_LOGGED_IN) === "yes" ? (
|
||||
<Redirect to={ROUTES.APPLICATION} />
|
||||
) : (
|
||||
<Redirect to={ROUTES.SIGN_UP} />
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</Suspense>
|
||||
|
||||
</BaseLayout>
|
||||
</RouteProvider>
|
||||
</Switch>
|
||||
</Route>
|
||||
</Suspense>
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import React, { ReactNode } from "react";
|
||||
import React, { ReactNode, useEffect } from "react";
|
||||
|
||||
import { Layout } from "antd";
|
||||
import SideNav from "./Nav/SideNav";
|
||||
import TopNav from "./Nav/TopNav";
|
||||
import { useLocation } from "react-router-dom";
|
||||
import { useRoute } from "./RouteProvider";
|
||||
|
||||
const { Content, Footer } = Layout;
|
||||
|
||||
interface BaseLayoutProps {
|
||||
@@ -10,6 +13,13 @@ interface BaseLayoutProps {
|
||||
}
|
||||
|
||||
const BaseLayout: React.FC<BaseLayoutProps> = ({ children }) => {
|
||||
const location = useLocation();
|
||||
const { dispatch } = useRoute();
|
||||
|
||||
useEffect(() => {
|
||||
dispatch({ type: "ROUTE_IS_LOADED", payload: location.pathname });
|
||||
}, [location]);
|
||||
|
||||
return (
|
||||
<Layout style={{ minHeight: "100vh" }}>
|
||||
<SideNav />
|
||||
|
||||
@@ -6,7 +6,7 @@ import { RouteComponentProps } from "react-router-dom";
|
||||
import styled from "styled-components";
|
||||
import ROUTES from "Src/constants/routes";
|
||||
|
||||
import { metricItem } from "../../store/actions/metrics";
|
||||
import { metricItem } from "../../store/actions/MetricsActions";
|
||||
|
||||
const ChartPopUpUnique = styled.div<{
|
||||
ycoordinate: number;
|
||||
@@ -55,7 +55,7 @@ class ErrorRateChart extends React.Component<ErrorRateChartProps> {
|
||||
xcoordinate: 0,
|
||||
ycoordinate: 0,
|
||||
showpopUp: false,
|
||||
firstpoint_ts: 0
|
||||
firstpoint_ts: 0,
|
||||
// graphInfo:{}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
import React from "react";
|
||||
import { Line as ChartJSLine } from "react-chartjs-2";
|
||||
import { ChartOptions } from "chart.js";
|
||||
import { withRouter } from "react-router";
|
||||
import { RouteComponentProps } from "react-router-dom";
|
||||
import styled from "styled-components";
|
||||
import { getOptions, borderColors } from "./graphConfig";
|
||||
import { externalMetricsItem } from "../../store/actions/metrics";
|
||||
import { externalMetricsItem } from "../../../store/actions/MetricsActions";
|
||||
import { uniqBy, filter } from "lodash";
|
||||
|
||||
const theme = "dark";
|
||||
|
||||
@@ -2,7 +2,7 @@ import React from "react";
|
||||
import { Bar, Line as ChartJSLine } from "react-chartjs-2";
|
||||
import styled from "styled-components";
|
||||
|
||||
import { customMetricsItem } from "../../store/actions/metrics";
|
||||
import { customMetricsItem } from "../../store/actions/MetricsActions";
|
||||
|
||||
const GenVisualizationWrapper = styled.div`
|
||||
height: 160px;
|
||||
|
||||
@@ -4,9 +4,7 @@ import { ChartOptions } from "chart.js";
|
||||
import { withRouter } from "react-router";
|
||||
import { RouteComponentProps } from "react-router-dom";
|
||||
import styled from "styled-components";
|
||||
import ROUTES from "Src/constants/routes";
|
||||
|
||||
import { metricItem } from "../../store/actions/metrics";
|
||||
import { metricItem } from "../../store/actions/MetricsActions";
|
||||
|
||||
const ChartPopUpUnique = styled.div<{
|
||||
ycoordinate: number;
|
||||
@@ -39,11 +37,8 @@ interface LatencyLineChartProps extends RouteComponentProps<any> {
|
||||
popupClickHandler: Function;
|
||||
}
|
||||
|
||||
interface LatencyLineChart {
|
||||
chartRef: any;
|
||||
}
|
||||
|
||||
class LatencyLineChart extends React.Component<LatencyLineChartProps> {
|
||||
private chartRef: React.RefObject<HTMLElement>;
|
||||
constructor(props: LatencyLineChartProps) {
|
||||
super(props);
|
||||
this.chartRef = React.createRef();
|
||||
@@ -54,7 +49,6 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
|
||||
ycoordinate: 0,
|
||||
showpopUp: false,
|
||||
firstpoint_ts: 0,
|
||||
// graphInfo:{}
|
||||
};
|
||||
|
||||
onClickhandler = async (e: any, event: any) => {
|
||||
@@ -69,7 +63,6 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
|
||||
ycoordinate: e.offsetY,
|
||||
showpopUp: true,
|
||||
firstpoint_ts: this.props.data[firstPoint._index].timestamp,
|
||||
// graphInfo:{...event}
|
||||
});
|
||||
} else {
|
||||
// if clicked outside of the graph line, then firstpoint is undefined -> close popup.
|
||||
@@ -80,15 +73,6 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
|
||||
}
|
||||
};
|
||||
|
||||
gotoTracesHandler = (xc: any) => {
|
||||
this.props.history.push(ROUTES.TRACES);
|
||||
};
|
||||
|
||||
gotoAlertsHandler = () => {
|
||||
this.props.history.push(ROUTES.SERVICE_MAP);
|
||||
// PNOTE - Keeping service map for now, will replace with alerts when alert page is made
|
||||
};
|
||||
|
||||
options_charts: ChartOptions = {
|
||||
onClick: this.onClickhandler,
|
||||
|
||||
@@ -161,9 +145,6 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
|
||||
xAxes: [
|
||||
{
|
||||
type: "time",
|
||||
// time: {
|
||||
// unit: 'second'
|
||||
// },
|
||||
distribution: "linear",
|
||||
//'linear': data are spread according to their time (distances can vary)
|
||||
// From https://www.chartjs.org/docs/latest/axes/cartesian/time.html
|
||||
@@ -193,7 +174,6 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
|
||||
>
|
||||
View Traces
|
||||
</PopUpElements>
|
||||
{/* <PopUpElements onClick={this.gotoAlertsHandler}>Set Alerts</PopUpElements> */}
|
||||
</ChartPopUpUnique>
|
||||
);
|
||||
} else return null;
|
||||
@@ -239,7 +219,7 @@ class LatencyLineChart extends React.Component<LatencyLineChartProps> {
|
||||
<div>
|
||||
{this.GraphTracePopUp()}
|
||||
<div>
|
||||
<div style={{textAlign: "center"}}>Application latency in ms</div>
|
||||
<div style={{ textAlign: "center" }}>Application latency in ms</div>
|
||||
<ChartJSLine
|
||||
ref={this.chartRef}
|
||||
data={data_chartJS}
|
||||
|
||||
@@ -5,7 +5,7 @@ import { withRouter } from "react-router";
|
||||
import { RouteComponentProps } from "react-router-dom";
|
||||
import styled from "styled-components";
|
||||
|
||||
import { metricItem } from "../../store/actions/metrics";
|
||||
import { metricItem } from "../../store/actions/MetricsActions";
|
||||
import ROUTES from "Src/constants/routes";
|
||||
|
||||
const ChartPopUpUnique = styled.div<{
|
||||
|
||||
@@ -4,23 +4,24 @@ import { connect } from "react-redux";
|
||||
import { useParams, RouteComponentProps } from "react-router-dom";
|
||||
import { withRouter } from "react-router";
|
||||
import ROUTES from "Src/constants/routes";
|
||||
|
||||
import { GlobalTime, updateTimeInterval } from "Src/store/actions";
|
||||
import {
|
||||
getServicesMetrics,
|
||||
metricItem,
|
||||
getTopEndpoints,
|
||||
getDbOverViewMetrics,
|
||||
getExternalMetrics,
|
||||
externalMetricsAvgDurationItem,
|
||||
externalErrCodeMetricsItem,
|
||||
externalMetricsItem,
|
||||
dbOverviewMetricsItem,
|
||||
topEndpointListItem,
|
||||
} from "../../store/actions/MetricsActions";
|
||||
import {
|
||||
getServicesMetrics,
|
||||
getTopEndpoints,
|
||||
getDbOverViewMetrics,
|
||||
getExternalMetrics,
|
||||
getExternalAvgDurationMetrics,
|
||||
getExternalErrCodeMetrics,
|
||||
topEndpointListItem,
|
||||
GlobalTime,
|
||||
updateTimeInterval,
|
||||
} from "Src/store/actions";
|
||||
} from "../../store/actions/MetricsActions";
|
||||
|
||||
import { StoreState } from "../../store/reducers";
|
||||
import LatencyLineChart from "./LatencyLineChart";
|
||||
import RequestRateChart from "./RequestRateChart";
|
||||
@@ -223,13 +224,13 @@ const mapStateToProps = (
|
||||
globalTime: GlobalTime;
|
||||
} => {
|
||||
return {
|
||||
externalErrCodeMetrics: state.externalErrCodeMetrics,
|
||||
serviceMetrics: state.serviceMetrics,
|
||||
topEndpointsList: state.topEndpointsList,
|
||||
externalMetrics: state.externalMetrics,
|
||||
externalErrCodeMetrics: state.metricsData.externalErrCodeMetricsItem,
|
||||
serviceMetrics: state.metricsData.metricItems,
|
||||
topEndpointsList: state.metricsData.topEndpointListItem,
|
||||
externalMetrics: state.metricsData.externalMetricsItem,
|
||||
globalTime: state.globalTime,
|
||||
dbOverviewMetrics: state.dbOverviewMetrics,
|
||||
externalAvgDurationMetrics: state.externalAvgDurationMetrics,
|
||||
dbOverviewMetrics: state.metricsData.dbOverviewMetricsItem,
|
||||
externalAvgDurationMetrics: state.metricsData.externalMetricsAvgDurationItem,
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -1,17 +1,12 @@
|
||||
import React, { useEffect, useState } from "react";
|
||||
import { useLocation } from "react-router-dom";
|
||||
import { NavLink } from "react-router-dom";
|
||||
import { Button, Space, Spin, Table } from "antd";
|
||||
import styled from "styled-components";
|
||||
import { connect } from "react-redux";
|
||||
import { SKIP_ONBOARDING } from "Src/constants/onboarding";
|
||||
import ROUTES from "Src/constants/routes";
|
||||
|
||||
import {
|
||||
getServicesList,
|
||||
GlobalTime,
|
||||
servicesListItem,
|
||||
} from "../../store/actions";
|
||||
import { getServicesList, GlobalTime } from "../../store/actions";
|
||||
import { servicesListItem } from "../../store/actions/MetricsActions";
|
||||
import { StoreState } from "../../store/reducers";
|
||||
import { CustomModal } from "../../components/Modal";
|
||||
|
||||
@@ -75,7 +70,7 @@ const columns = [
|
||||
key: "errorRate",
|
||||
sorter: (a: any, b: any) => a.errorRate - b.errorRate,
|
||||
// sortDirections: ['descend', 'ascend'],
|
||||
render: (value: number) => (value).toFixed(2),
|
||||
render: (value: number) => value.toFixed(2),
|
||||
},
|
||||
{
|
||||
title: "Requests Per Second",
|
||||
@@ -88,8 +83,6 @@ const columns = [
|
||||
];
|
||||
|
||||
const _ServicesTable = (props: ServicesTableProps) => {
|
||||
const search = useLocation().search;
|
||||
const time_interval = new URLSearchParams(search).get("time");
|
||||
const [initialDataFetch, setDataFetched] = useState(false);
|
||||
const [errorObject, setErrorObject] = useState({
|
||||
message: "",
|
||||
@@ -210,7 +203,10 @@ const _ServicesTable = (props: ServicesTableProps) => {
|
||||
const mapStateToProps = (
|
||||
state: StoreState,
|
||||
): { servicesList: servicesListItem[]; globalTime: GlobalTime } => {
|
||||
return { servicesList: state.servicesList, globalTime: state.globalTime };
|
||||
return {
|
||||
servicesList: state.metricsData.serviceList,
|
||||
globalTime: state.globalTime,
|
||||
};
|
||||
};
|
||||
|
||||
export const ServicesTable = connect(mapStateToProps, {
|
||||
|
||||
12
frontend/src/modules/Metrics/TopEndpointsTable.css
Normal file
12
frontend/src/modules/Metrics/TopEndpointsTable.css
Normal file
@@ -0,0 +1,12 @@
|
||||
@media only screen and (min-width: 768px) {
|
||||
.topEndpointsButton {
|
||||
white-space: nowrap;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.topEndpointsButton span {
|
||||
text-overflow: ellipsis;
|
||||
overflow: hidden;
|
||||
max-width: 120px;
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,22 @@
|
||||
import React from "react";
|
||||
import { Table, Button } from "antd";
|
||||
import { Table, Button, Tooltip } from "antd";
|
||||
import { connect } from "react-redux";
|
||||
import styled from "styled-components";
|
||||
import { useHistory, useParams } from "react-router-dom";
|
||||
import { topEndpointListItem } from "../../store/actions/metrics";
|
||||
import { topEndpointListItem } from "../../store/actions/MetricsActions";
|
||||
import { METRICS_PAGE_QUERY_PARAM } from "Src/constants/query";
|
||||
import { GlobalTime } from "Src/store/actions";
|
||||
import { StoreState } from "Src/store/reducers";
|
||||
import "./TopEndpointsTable.css";
|
||||
|
||||
const Wrapper = styled.div`
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
padding-left: 20px;
|
||||
padding-right: 20px;
|
||||
padding-left: 8px;
|
||||
padding-right: 8px;
|
||||
@media only screen and (max-width: 767px) {
|
||||
padding: 0;
|
||||
}
|
||||
.ant-table table {
|
||||
font-size: 12px;
|
||||
}
|
||||
@@ -22,6 +26,9 @@ const Wrapper = styled.div`
|
||||
.ant-table-thead > tr > th {
|
||||
padding: 10px;
|
||||
}
|
||||
.ant-table-column-sorters {
|
||||
padding: 6px;
|
||||
}
|
||||
`;
|
||||
|
||||
interface TopEndpointsTableProps {
|
||||
@@ -58,9 +65,15 @@ const _TopEndpointsTable = (props: TopEndpointsTableProps) => {
|
||||
key: "name",
|
||||
|
||||
render: (text: string) => (
|
||||
<Button type="link" onClick={() => handleOnClick(text)}>
|
||||
{text}
|
||||
</Button>
|
||||
<Tooltip placement="topLeft" title={text}>
|
||||
<Button
|
||||
className="topEndpointsButton"
|
||||
type="link"
|
||||
onClick={() => handleOnClick(text)}
|
||||
>
|
||||
{text}
|
||||
</Button>
|
||||
</Tooltip>
|
||||
),
|
||||
},
|
||||
{
|
||||
@@ -72,10 +85,10 @@ const _TopEndpointsTable = (props: TopEndpointsTableProps) => {
|
||||
render: (value: number) => (value / 1000000).toFixed(2),
|
||||
},
|
||||
{
|
||||
title: "P90 (in ms)",
|
||||
dataIndex: "p90",
|
||||
key: "p90",
|
||||
sorter: (a: any, b: any) => a.p90 - b.p90,
|
||||
title: "P95 (in ms)",
|
||||
dataIndex: "p95",
|
||||
key: "p95",
|
||||
sorter: (a: any, b: any) => a.p95 - b.p95,
|
||||
// sortDirections: ['descend', 'ascend'],
|
||||
render: (value: number) => (value / 1000000).toFixed(2),
|
||||
},
|
||||
|
||||
83
frontend/src/modules/RouteProvider.tsx
Normal file
83
frontend/src/modules/RouteProvider.tsx
Normal file
@@ -0,0 +1,83 @@
|
||||
import React, { useContext, createContext, ReactNode, Dispatch } from "react";
|
||||
import ROUTES from "Src/constants/routes";
|
||||
|
||||
type State = {
|
||||
[key: string]: {
|
||||
route: string;
|
||||
isLoaded: boolean;
|
||||
};
|
||||
};
|
||||
|
||||
enum ActionTypes {
|
||||
UPDATE_IS_LOADED = "ROUTE_IS_LOADED",
|
||||
}
|
||||
|
||||
type Action = {
|
||||
type: ActionTypes;
|
||||
payload: string;
|
||||
};
|
||||
|
||||
interface ContextType {
|
||||
state: State;
|
||||
dispatch: Dispatch<Action>;
|
||||
}
|
||||
|
||||
const RouteContext = createContext<ContextType | null>(null);
|
||||
|
||||
interface RouteProviderProps {
|
||||
children: ReactNode;
|
||||
}
|
||||
interface RouteObj {
|
||||
[key: string]: {
|
||||
route: string;
|
||||
isLoaded: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
const updateLocation = (state: State, action: Action): State => {
|
||||
if (action.type === ActionTypes.UPDATE_IS_LOADED) {
|
||||
/*
|
||||
Update the isLoaded property in routes obj
|
||||
if the route matches the current pathname
|
||||
|
||||
Why: Checkout this issue https://github.com/SigNoz/signoz/issues/110
|
||||
To avoid calling the api's twice for Date picker,
|
||||
We will only call once the route is changed
|
||||
*/
|
||||
Object.keys(ROUTES).map((items) => {
|
||||
state[items].isLoaded = state[items].route === action.payload;
|
||||
});
|
||||
return {
|
||||
...state,
|
||||
};
|
||||
}
|
||||
return {
|
||||
...state,
|
||||
};
|
||||
};
|
||||
|
||||
const getInitialState = () => {
|
||||
const routes: RouteObj = {};
|
||||
Object.keys(ROUTES).map((items) => {
|
||||
routes[items] = {
|
||||
route: `${ROUTES[items]}`,
|
||||
isLoaded: false,
|
||||
};
|
||||
});
|
||||
return routes;
|
||||
};
|
||||
|
||||
const RouteProvider: React.FC<RouteProviderProps> = ({ children }) => {
|
||||
const [state, dispatch] = React.useReducer(updateLocation, getInitialState());
|
||||
const value = { state, dispatch };
|
||||
return <RouteContext.Provider value={value}>{children}</RouteContext.Provider>;
|
||||
};
|
||||
|
||||
const useRoute = (): ContextType => {
|
||||
const context = useContext(RouteContext);
|
||||
if (context === undefined) {
|
||||
throw new Error("useRoute must be used within a RouteProvider");
|
||||
}
|
||||
return context as ContextType;
|
||||
};
|
||||
export { RouteProvider, useRoute };
|
||||
@@ -1,4 +1,4 @@
|
||||
import React, { useEffect, useRef, useState } from "react";
|
||||
import React, { useEffect, useRef } from "react";
|
||||
import { connect } from "react-redux";
|
||||
import { RouteComponentProps } from "react-router-dom";
|
||||
import {
|
||||
@@ -14,6 +14,7 @@ import { StoreState } from "../../store/reducers";
|
||||
import { getZoomPx, getGraphData, getTooltip, transformLabel } from "./utils";
|
||||
import SelectService from "./SelectService";
|
||||
import { ForceGraph2D } from "react-force-graph";
|
||||
import { useRoute } from "../RouteProvider";
|
||||
|
||||
const Container = styled.div`
|
||||
.force-graph-container .graph-tooltip {
|
||||
@@ -53,6 +54,8 @@ export interface graphDataType {
|
||||
|
||||
const ServiceMap = (props: ServiceMapProps) => {
|
||||
const fgRef = useRef();
|
||||
const { state } = useRoute();
|
||||
|
||||
const {
|
||||
getDetailedServiceMapItems,
|
||||
getServiceMapItems,
|
||||
@@ -61,8 +64,14 @@ const ServiceMap = (props: ServiceMapProps) => {
|
||||
} = props;
|
||||
|
||||
useEffect(() => {
|
||||
getServiceMapItems(globalTime);
|
||||
getDetailedServiceMapItems(globalTime);
|
||||
/*
|
||||
Call the apis only when the route is loaded.
|
||||
Check this issue: https://github.com/SigNoz/signoz/issues/110
|
||||
*/
|
||||
if (state.SERVICE_MAP.isLoaded) {
|
||||
getServiceMapItems(globalTime);
|
||||
getDetailedServiceMapItems(globalTime);
|
||||
}
|
||||
}, [globalTime]);
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
@@ -83,6 +83,7 @@ export const getZoomPx = (): number => {
|
||||
} else if (width > 1700) {
|
||||
return 470;
|
||||
}
|
||||
return 190;
|
||||
};
|
||||
|
||||
export const getTooltip = (node: {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import React from "react";
|
||||
import { Modal, Form, InputNumber, Col, Row } from "antd";
|
||||
import { Store } from "antd/lib/form/interface";
|
||||
import { NamePath, Store } from "antd/lib/form/interface";
|
||||
|
||||
interface LatencyModalFormProps {
|
||||
onCreate: (values: Store) => void; //Store is defined in antd forms library
|
||||
@@ -13,13 +13,32 @@ const LatencyModalForm: React.FC<LatencyModalFormProps> = ({
|
||||
onCancel,
|
||||
latencyFilterValues,
|
||||
}) => {
|
||||
const [form] = Form.useForm();
|
||||
const [form] = Form.useForm();
|
||||
|
||||
const validateMinValue = ({ getFieldValue }: {getFieldValue: (name: NamePath) => any}) => ({
|
||||
validator(_, value: any) {
|
||||
if (value < getFieldValue('max')) {
|
||||
return Promise.resolve();
|
||||
}
|
||||
return Promise.reject(new Error('Min value should be less than Max value'));
|
||||
},
|
||||
})
|
||||
|
||||
const validateMaxValue = ({ getFieldValue }: {getFieldValue: (name: NamePath) => any}) => ({
|
||||
validator(_, value: any) {
|
||||
if (value > getFieldValue('min')) {
|
||||
return Promise.resolve();
|
||||
}
|
||||
return Promise.reject(new Error('Max value should be greater than Min value'));
|
||||
},
|
||||
})
|
||||
|
||||
return (
|
||||
<Modal
|
||||
visible={true}
|
||||
title="Chose min and max values of Latency"
|
||||
okText="Apply"
|
||||
cancelText="Cancel"
|
||||
cancelText="Cancel"
|
||||
onCancel={onCancel}
|
||||
onOk={() => {
|
||||
form
|
||||
@@ -31,7 +50,7 @@ const LatencyModalForm: React.FC<LatencyModalFormProps> = ({
|
||||
.catch((info) => {
|
||||
console.log("Validate Failed:", info);
|
||||
});
|
||||
}}
|
||||
}}
|
||||
>
|
||||
<Form
|
||||
form={form}
|
||||
@@ -44,14 +63,19 @@ const LatencyModalForm: React.FC<LatencyModalFormProps> = ({
|
||||
<Col span={12}>
|
||||
<Form.Item
|
||||
name="min"
|
||||
label="Min (in ms)"
|
||||
label="Min (in ms)"
|
||||
rules={[validateMinValue]}
|
||||
// rules={[{ required: true, message: 'Please input the title of collection!' }]}
|
||||
>
|
||||
<InputNumber />
|
||||
</Form.Item>
|
||||
</Col>
|
||||
<Col span={12}>
|
||||
<Form.Item name="max" label="Max (in ms)">
|
||||
<Form.Item
|
||||
name="max"
|
||||
label="Max (in ms)"
|
||||
rules = {[validateMaxValue]}
|
||||
>
|
||||
<InputNumber />
|
||||
</Form.Item>
|
||||
</Col>
|
||||
|
||||
@@ -1,47 +1,94 @@
|
||||
import React from "react";
|
||||
import { Card, Tabs } from "antd";
|
||||
import { Card, Space, Tabs, Typography } from "antd";
|
||||
import styled from "styled-components";
|
||||
import { pushDStree } from "../../store/actions";
|
||||
|
||||
const { TabPane } = Tabs;
|
||||
|
||||
interface spanTagItem {
|
||||
key: string;
|
||||
type: string;
|
||||
value: string;
|
||||
}
|
||||
const { Text } = Typography;
|
||||
|
||||
interface SelectedSpanDetailsProps {
|
||||
clickedSpanTags: spanTagItem[];
|
||||
data: pushDStree
|
||||
}
|
||||
|
||||
const Title = styled(Text)`
|
||||
color: "#2D9CDB",
|
||||
fontSize: '12px',
|
||||
`;
|
||||
|
||||
const SelectedSpanDetails = (props: SelectedSpanDetailsProps) => {
|
||||
const callback = (key: any) => {};
|
||||
|
||||
let spanTags = props.data.tags;
|
||||
let service = props.data?.name?.split(":")[0];
|
||||
let operation = props.data?.name?.split(":")[1];
|
||||
|
||||
return (
|
||||
<Card style={{ height: 320 }}>
|
||||
<Tabs defaultActiveKey="1" onChange={callback}>
|
||||
<Card style={{ border: "none", background: "transparent", padding: 0 }} bodyStyle={{ padding: 0 }}>
|
||||
<Space direction="vertical">
|
||||
|
||||
<strong> Details for selected Span </strong>
|
||||
<Space direction="vertical" size={2}>
|
||||
<Text style={{ marginTop: "18px" }}>
|
||||
Service
|
||||
</Text>
|
||||
<Title style={{ color: "#2D9CDB", fontSize: "12px" }}>
|
||||
{service}
|
||||
</Title>
|
||||
</Space>
|
||||
<Space direction="vertical" size={2}>
|
||||
<Text>
|
||||
Operation
|
||||
</Text>
|
||||
<Text style={{ color: "#2D9CDB", fontSize: "12px" }}>
|
||||
{operation}
|
||||
</Text>
|
||||
</Space>
|
||||
</Space>
|
||||
<Tabs defaultActiveKey="1">
|
||||
<TabPane tab="Tags" key="1">
|
||||
<strong> Details for selected Span </strong>
|
||||
{props.clickedSpanTags.map((tags, index) => (
|
||||
<li
|
||||
key={index}
|
||||
style={{ color: "grey", fontSize: "13px", listStyle: "none" }}
|
||||
>
|
||||
<span className="mr-1">{tags.key}</span>:
|
||||
<span className="ml-1">
|
||||
{tags.key === "error" ? "true" : tags.value}
|
||||
</span>
|
||||
</li>
|
||||
))}{" "}
|
||||
{spanTags && spanTags.map((tags, index) => {
|
||||
return (
|
||||
<>
|
||||
{tags.value && (
|
||||
<>
|
||||
<Text style={{ color: "#BDBDBD", fontSize: "12px", marginBottom: "8px" }}>
|
||||
{tags.key}
|
||||
</Text>
|
||||
<div style={{
|
||||
background: "#4F4F4F",
|
||||
color: "#2D9CDB",
|
||||
fontSize: "12px",
|
||||
padding: "6px 8px",
|
||||
wordBreak: "break-all",
|
||||
marginBottom: "16px",
|
||||
}}>
|
||||
{tags.key === "error" ? "true" : tags.value}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
})}
|
||||
</TabPane>
|
||||
<TabPane tab="Errors" key="2">
|
||||
{props.clickedSpanTags
|
||||
{spanTags && spanTags
|
||||
.filter((tags) => tags.key === "error")
|
||||
.map((error) => (
|
||||
<div className="ml-5">
|
||||
<p style={{ color: "grey", fontSize: "10px" }}>
|
||||
<span className="mr-1">{error.key}</span>:
|
||||
<span className="ml-1">true</span>
|
||||
</p>
|
||||
</div>
|
||||
<>
|
||||
<Text style={{ color: "#BDBDBD", fontSize: "12px", marginBottom: "8px" }}>
|
||||
{error.key}
|
||||
</Text>
|
||||
<div style={{
|
||||
background: "#4F4F4F",
|
||||
color: "#2D9CDB",
|
||||
fontSize: "12px",
|
||||
padding: "6px 8px",
|
||||
wordBreak: "break-all",
|
||||
marginBottom: "16px",
|
||||
}}>
|
||||
true
|
||||
</div>
|
||||
</>
|
||||
))}
|
||||
</TabPane>
|
||||
</Tabs>
|
||||
|
||||
@@ -2,15 +2,11 @@ import React, { useState, useEffect } from "react";
|
||||
import GenericVisualizations from "../Metrics/GenericVisualization";
|
||||
import { Select, Card, Space, Form } from "antd";
|
||||
import { connect } from "react-redux";
|
||||
|
||||
import { StoreState } from "../../store/reducers";
|
||||
import {
|
||||
customMetricsItem,
|
||||
getFilteredTraceMetrics,
|
||||
GlobalTime,
|
||||
TraceFilters,
|
||||
} from "../../store/actions";
|
||||
|
||||
import { GlobalTime, TraceFilters } from "../../store/actions";
|
||||
import { useRoute } from "../RouteProvider";
|
||||
import { getFilteredTraceMetrics } from "../../store/actions/MetricsActions";
|
||||
import { customMetricsItem } from "../../store/actions/MetricsActions";
|
||||
const { Option } = Select;
|
||||
|
||||
const entity = [
|
||||
@@ -48,10 +44,10 @@ const aggregation_options = [
|
||||
{
|
||||
linked_entity: "duration",
|
||||
default_selected: { title: "p99", dataindex: "p99" },
|
||||
// options_available: [ {title:'Avg', dataindex:'avg'}, {title:'Max', dataindex:'max'},{title:'Min', dataindex:'min'}, {title:'p50', dataindex:'p50'},{title:'p90', dataindex:'p90'}, {title:'p95', dataindex:'p95'}]
|
||||
// options_available: [ {title:'Avg', dataindex:'avg'}, {title:'Max', dataindex:'max'},{title:'Min', dataindex:'min'}, {title:'p50', dataindex:'p50'},{title:'p95', dataindex:'p95'}, {title:'p95', dataindex:'p95'}]
|
||||
options_available: [
|
||||
{ title: "p50", dataindex: "p50" },
|
||||
{ title: "p90", dataindex: "p90" },
|
||||
{ title: "p95", dataindex: "p95" },
|
||||
{ title: "p99", dataindex: "p99" },
|
||||
],
|
||||
},
|
||||
@@ -80,7 +76,10 @@ interface TraceCustomVisualizationsProps {
|
||||
const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
|
||||
const [selectedEntity, setSelectedEntity] = useState("calls");
|
||||
const [selectedAggOption, setSelectedAggOption] = useState("count");
|
||||
const [selectedStep, setSelectedStep] = useState("60");
|
||||
const { state } = useRoute();
|
||||
const [form] = Form.useForm();
|
||||
const selectedStep = "60";
|
||||
|
||||
// Step should be multiples of 60, 60 -> 1 min
|
||||
|
||||
useEffect(() => {
|
||||
@@ -109,21 +108,18 @@ const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
|
||||
minTime: props.globalTime.minTime - 15 * 60 * 1000000000,
|
||||
maxTime: props.globalTime.maxTime + 15 * 60 * 1000000000,
|
||||
};
|
||||
props.getFilteredTraceMetrics(request_string, plusMinus15);
|
||||
|
||||
/*
|
||||
Call the apis only when the route is loaded.
|
||||
Check this issue: https://github.com/SigNoz/signoz/issues/110
|
||||
*/
|
||||
if (state.TRACES.isLoaded) {
|
||||
props.getFilteredTraceMetrics(request_string, plusMinus15);
|
||||
}
|
||||
}, [selectedEntity, selectedAggOption, props.traceFilters, props.globalTime]);
|
||||
|
||||
//Custom metrics API called if time, tracefilters, selected entity or agg option changes
|
||||
|
||||
const [form] = Form.useForm();
|
||||
|
||||
function handleChange(value: string) {
|
||||
// console.log(value);
|
||||
}
|
||||
|
||||
function handleFinish(value: string) {
|
||||
// console.log(value);
|
||||
}
|
||||
|
||||
// PNOTE - Can also use 'coordinate' option in antd Select for implementing this - https://ant.design/components/select/
|
||||
const handleFormValuesChange = (changedValues: any) => {
|
||||
const formFieldName = Object.keys(changedValues)[0];
|
||||
@@ -152,11 +148,9 @@ const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
|
||||
|
||||
return (
|
||||
<Card>
|
||||
{/* <Space direction="vertical"> */}
|
||||
<div>Custom Visualizations</div>
|
||||
<Form
|
||||
form={form}
|
||||
onFinish={handleFinish}
|
||||
onValuesChange={handleFormValuesChange}
|
||||
initialValues={{
|
||||
agg_options: "Count",
|
||||
@@ -189,7 +183,7 @@ const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
|
||||
</Form.Item>
|
||||
|
||||
<Form.Item name="chart_style">
|
||||
<Select style={{ width: 120 }} onChange={handleChange} allowClear>
|
||||
<Select style={{ width: 120 }} allowClear>
|
||||
<Option value="line">Line Chart</Option>
|
||||
<Option value="bar">Bar Chart</Option>
|
||||
<Option value="area">Area Chart</Option>
|
||||
@@ -197,7 +191,7 @@ const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
|
||||
</Form.Item>
|
||||
|
||||
<Form.Item name="interval">
|
||||
<Select style={{ width: 120 }} onChange={handleChange} allowClear>
|
||||
<Select style={{ width: 120 }} allowClear>
|
||||
<Option value="1m">1 min</Option>
|
||||
<Option value="5m">5 min</Option>
|
||||
<Option value="30m">30 min</Option>
|
||||
@@ -206,7 +200,7 @@ const _TraceCustomVisualizations = (props: TraceCustomVisualizationsProps) => {
|
||||
|
||||
{/* Need heading for each option */}
|
||||
<Form.Item name="group_by">
|
||||
<Select style={{ width: 120 }} onChange={handleChange} allowClear>
|
||||
<Select style={{ width: 120 }} allowClear>
|
||||
<Option value="none">Group By</Option>
|
||||
<Option value="status">Status Code</Option>
|
||||
<Option value="protocol">Protocol</Option>
|
||||
@@ -229,7 +223,7 @@ const mapStateToProps = (
|
||||
traceFilters: TraceFilters;
|
||||
} => {
|
||||
return {
|
||||
filteredTraceMetrics: state.filteredTraceMetrics,
|
||||
filteredTraceMetrics: state.metricsData.customMetricsItem,
|
||||
globalTime: state.globalTime,
|
||||
traceFilters: state.traceFilters,
|
||||
};
|
||||
|
||||
@@ -18,6 +18,7 @@ import FormItem from "antd/lib/form/FormItem";
|
||||
import api, { apiV1 } from "../../api";
|
||||
import { useLocation } from "react-router-dom";
|
||||
import { METRICS_PAGE_QUERY_PARAM } from "Src/constants/query";
|
||||
import { useRoute } from "../RouteProvider";
|
||||
|
||||
const { Option } = Select;
|
||||
|
||||
@@ -45,6 +46,7 @@ const _TraceFilter = (props: TraceFilterProps) => {
|
||||
const [tagKeyOptions, setTagKeyOptions] = useState<TagKeyOptionItem[]>([]);
|
||||
const location = useLocation();
|
||||
const urlParams = new URLSearchParams(location.search.split("?")[1]);
|
||||
const { state } = useRoute();
|
||||
|
||||
useEffect(() => {
|
||||
handleApplyFilterForm({
|
||||
@@ -122,7 +124,13 @@ const _TraceFilter = (props: TraceFilterProps) => {
|
||||
"&tags=" +
|
||||
encodeURIComponent(JSON.stringify(props.traceFilters.tags));
|
||||
|
||||
props.fetchTraces(props.globalTime, request_string);
|
||||
/*
|
||||
Call the apis only when the route is loaded.
|
||||
Check this issue: https://github.com/SigNoz/signoz/issues/110
|
||||
*/
|
||||
if (state.TRACES.isLoaded) {
|
||||
props.fetchTraces(props.globalTime, request_string);
|
||||
}
|
||||
}, [props.traceFilters, props.globalTime]);
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
41
frontend/src/modules/Traces/TraceGantChartHelpers.js
Normal file
41
frontend/src/modules/Traces/TraceGantChartHelpers.js
Normal file
@@ -0,0 +1,41 @@
|
||||
// Doing DFS traversal on the tree
|
||||
// resultCount : how many entries you want. where -1 means all possible entries.
|
||||
// func(obj) : takes one element of the data structure and returns true if need to select or not
|
||||
|
||||
// program to implement stack data structure
|
||||
import { isEmpty } from "lodash-es";
|
||||
|
||||
const getTreeData = (tree, callback, resultCount = -1) => {
|
||||
if (resultCount === 0 || isEmpty(tree) || tree.id === "empty") return null;
|
||||
|
||||
let data = tree;
|
||||
let result = [];
|
||||
let stk = [];
|
||||
stk.push(data);
|
||||
|
||||
while (!isEmpty(stk)) {
|
||||
let x = stk[stk.length - 1];
|
||||
|
||||
// marked means seeing the node for the second time.
|
||||
if (x.marked) {
|
||||
delete x.marked;
|
||||
stk.pop();
|
||||
x.map((item) => {
|
||||
if (callback(item) === true) {
|
||||
result.push(item);
|
||||
if (resultCount !== -1 && result.length === resultCount) return result;
|
||||
}
|
||||
});
|
||||
} else {
|
||||
x.marked = true;
|
||||
x.map((item) => {
|
||||
if (item.children.length > 0) {
|
||||
stk.push(item.children);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
export default getTreeData;
|
||||
13
frontend/src/modules/Traces/TraceGanttChart.css
Normal file
13
frontend/src/modules/Traces/TraceGanttChart.css
Normal file
@@ -0,0 +1,13 @@
|
||||
.row-styles{
|
||||
cursor: pointer
|
||||
}
|
||||
.hide{
|
||||
display: none;
|
||||
}
|
||||
.ant-tabs-nav-list{
|
||||
justify-content: space-between;
|
||||
width: 100%;
|
||||
}
|
||||
.ant-table-body table {
|
||||
margin-bottom: 64px;
|
||||
}
|
||||
335
frontend/src/modules/Traces/TraceGanttChart.tsx
Normal file
335
frontend/src/modules/Traces/TraceGanttChart.tsx
Normal file
@@ -0,0 +1,335 @@
|
||||
import React, { useEffect, useRef, useState } from "react";
|
||||
import { Table, Progress, Tabs, Button, Row, Col } from "antd";
|
||||
import "./TraceGanttChart.css";
|
||||
import { max, isEmpty, has } from "lodash-es";
|
||||
import styled from "styled-components";
|
||||
import getTreeData from "Src/modules/Traces/TraceGantChartHelpers";
|
||||
import { pushDStree } from "../../store/actions";
|
||||
|
||||
const { TabPane } = Tabs;
|
||||
|
||||
const StyledButton = styled(Button)`
|
||||
border: 1px solid #e0e0e0;
|
||||
border-radius: 4px;
|
||||
color: #f2f2f2;
|
||||
font-size: 14px;
|
||||
line-height: 20px;
|
||||
`;
|
||||
|
||||
interface TraceGanttChartProps {
|
||||
treeData: pushDStree;
|
||||
clickedSpan: pushDStree;
|
||||
selectedSpan: pushDStree;
|
||||
resetZoom: () => {};
|
||||
setSpanTagsInfo: () => {};
|
||||
}
|
||||
|
||||
const TraceGanttChart = ({
|
||||
treeData,
|
||||
clickedSpan,
|
||||
selectedSpan,
|
||||
resetZoom,
|
||||
setSpanTagsInfo,
|
||||
}: TraceGanttChartProps) => {
|
||||
let checkStrictly = true;
|
||||
const [selectedRows, setSelectedRows] = useState([]);
|
||||
const [clickedSpanData, setClickedSpanData] = useState(clickedSpan);
|
||||
const [defaultExpandedRows, setDefaultExpandedRows] = useState([]);
|
||||
const [sortedTreeData, setSortedTreeData] = useState(treeData);
|
||||
const [isReset, setIsReset] = useState(false);
|
||||
const [rowId, setRowId] = useState(0);
|
||||
const [tabsContainerWidth, setTabsContainerWidth] = useState(0);
|
||||
const tableRef = useRef("");
|
||||
let tabsContainer = document.querySelector(
|
||||
"#collapsable .ant-tabs-nav-list",
|
||||
);
|
||||
|
||||
let tabs = document.querySelectorAll("#collapsable .ant-tabs-tab");
|
||||
|
||||
const { id } = treeData || "id";
|
||||
let maxGlobal = 0;
|
||||
let minGlobal = 0;
|
||||
let medianGlobal = 0;
|
||||
let endTimeArray: [] = [];
|
||||
|
||||
useEffect(() => {
|
||||
if (id !== "empty") {
|
||||
setSortedTreeData(treeData);
|
||||
if (clickedSpan) {
|
||||
setClickedSpanData(clickedSpan);
|
||||
}
|
||||
setTabsContainerWidth(tabsContainer?.offsetWidth)
|
||||
}
|
||||
// handleScroll(selectedSpan?.id);
|
||||
}, [sortedTreeData, treeData, clickedSpan]);
|
||||
|
||||
useEffect(() => {
|
||||
if (
|
||||
!isEmpty(clickedSpanData) &&
|
||||
clickedSpan &&
|
||||
!selectedRows.includes(clickedSpan.id)
|
||||
&& !isReset
|
||||
) {
|
||||
setSelectedRows([clickedSpan.id]);
|
||||
getParentKeys(clickedSpan);
|
||||
let keys = [clickedSpan?.id, ...parentKeys];
|
||||
// setDefaultExpandedRows(keys)
|
||||
handleFocusOnSelectedPath("", [clickedSpan.id], clickedSpan);
|
||||
}
|
||||
}, [clickedSpan, selectedRows, isReset, clickedSpanData]);
|
||||
|
||||
let parentKeys = [];
|
||||
let childrenKeys = [];
|
||||
const getParentKeys = (obj) => {
|
||||
if (has(obj, "parent")) {
|
||||
parentKeys.push(obj.parent.id);
|
||||
getParentKeys(obj.parent);
|
||||
}
|
||||
};
|
||||
|
||||
const getChildrenKeys = (obj) =>{
|
||||
if (has(obj, "children")) {
|
||||
childrenKeys.push(obj.id);
|
||||
if(!isEmpty(obj.children)){
|
||||
obj.children.map((item)=>{
|
||||
getChildrenKeys(item);
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
if (!isEmpty(selectedSpan) && isEmpty(clickedSpan)) {
|
||||
getParentKeys(selectedSpan);
|
||||
let keys = [selectedSpan?.id, ...parentKeys];
|
||||
setDefaultExpandedRows(keys);
|
||||
setSelectedRows([selectedSpan.id, clickedSpan]);
|
||||
// setSpanTagsInfo({data: selectedSpan})
|
||||
} else {
|
||||
setSelectedRows([treeData?.[0]?.id]);
|
||||
setDefaultExpandedRows([treeData?.[0]?.id]);
|
||||
// /.setSpanTagsInfo({data: treeData?.[0]})
|
||||
}
|
||||
|
||||
}, [selectedSpan, treeData]);
|
||||
|
||||
const getMaxEndTime = (treeData) => {
|
||||
if (treeData.length > 0) {
|
||||
if (treeData?.id !== "empty") {
|
||||
return Array.from(treeData).map((item, key) => {
|
||||
if (!isEmpty(item.children)) {
|
||||
endTimeArray.push(item.time / 1000000 + item.startTime);
|
||||
getMaxEndTime(item.children);
|
||||
} else {
|
||||
endTimeArray.push(item.time / 1000000 + item.startTime);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if (id !== "empty") {
|
||||
getMaxEndTime(treeData);
|
||||
maxGlobal = max(endTimeArray);
|
||||
minGlobal = treeData?.[0]?.startTime;
|
||||
medianGlobal = (minGlobal + maxGlobal) / 2;
|
||||
}
|
||||
|
||||
/*
|
||||
timeDiff = maxGlobal - startTime
|
||||
totalTime = maxGlobal - minGlobal
|
||||
totalWidth = width of container
|
||||
*/
|
||||
const getPaddingLeft = (timeDiff, totalTime, totalWidth) => {
|
||||
return ((timeDiff / totalTime) * totalWidth ).toFixed(0);
|
||||
};
|
||||
|
||||
let tabMinVal = 0;
|
||||
let tabMedianVal = (medianGlobal - minGlobal).toFixed(0);
|
||||
let tabMaxVal = (maxGlobal - minGlobal).toFixed(0);
|
||||
|
||||
const columns = [
|
||||
{
|
||||
title: "",
|
||||
dataIndex: "name",
|
||||
key: "name",
|
||||
},
|
||||
{
|
||||
title: (
|
||||
<Tabs>
|
||||
<TabPane tab={tabMinVal + "ms"} key="1" />
|
||||
<TabPane tab={tabMedianVal + "ms"} key="2" />
|
||||
<TabPane tab={tabMaxVal + "ms"} key="3" />
|
||||
</Tabs>
|
||||
),
|
||||
dataIndex: "trace",
|
||||
name: "trace",
|
||||
render: (_, record: pushDStree) => {
|
||||
let widths = [];
|
||||
let length;
|
||||
|
||||
if (widths.length < tabs.length) {
|
||||
Array.from(tabs).map((tab) => {
|
||||
widths.push(tab.offsetWidth);
|
||||
});
|
||||
}
|
||||
|
||||
let paddingLeft = 0;
|
||||
let startTime = parseInt(record.startTime);
|
||||
let duration = parseInt((record.time / 1000000).toFixed(2));
|
||||
paddingLeft = parseInt(getPaddingLeft(startTime - minGlobal, maxGlobal - minGlobal, tabsContainerWidth));
|
||||
let textPadding = paddingLeft;
|
||||
if(paddingLeft === tabsContainerWidth - 20){
|
||||
textPadding = tabsContainerWidth - 40
|
||||
}
|
||||
length = ((duration / (maxGlobal - startTime)) * 100).toFixed(
|
||||
2,
|
||||
);
|
||||
|
||||
return (
|
||||
<>
|
||||
<div style={{ paddingLeft: textPadding + "px" }}>{duration}ms</div>
|
||||
<Progress
|
||||
percent={length}
|
||||
showInfo={false}
|
||||
style={{ paddingLeft: paddingLeft + "px" }}
|
||||
/>
|
||||
</>
|
||||
);
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const handleFocusOnSelectedPath = (event, selectedRowsList = selectedRows) => {
|
||||
if (!isEmpty(selectedRowsList)) {
|
||||
let node: pushDStree = getTreeData(
|
||||
treeData,
|
||||
(item: pushDStree) => item.id === selectedRowsList[0],
|
||||
1,
|
||||
);
|
||||
setSpanTagsInfo({ data: node[0] });
|
||||
|
||||
getParentKeys(node[0]);
|
||||
getChildrenKeys(node[0]);
|
||||
|
||||
let rows = document.querySelectorAll("#collapsable table tbody tr");
|
||||
Array.from(rows).map((row) => {
|
||||
let attribKey = row.getAttribute("data-row-key");
|
||||
if (!selectedRowsList.includes(attribKey)) {
|
||||
row.classList.add("hide");
|
||||
}
|
||||
});
|
||||
setDefaultExpandedRows([...parentKeys, ...childrenKeys]);
|
||||
}
|
||||
};
|
||||
|
||||
const handleResetFocus = () => {
|
||||
let rows = document.querySelectorAll("#collapsable table tbody tr");
|
||||
Array.from(rows).map((row) => {
|
||||
row.classList.remove("hide");
|
||||
});
|
||||
|
||||
resetZoom(true);
|
||||
};
|
||||
|
||||
const handleScroll = (id) => {
|
||||
let rows = document.querySelectorAll("#collapsable table tbody tr");
|
||||
const table = document.querySelectorAll("#collapsable table");
|
||||
Array.from(rows).map((row) => {
|
||||
let attribKey = row.getAttribute("data-row-key");
|
||||
if (id === attribKey) {
|
||||
let scrollValue = row.offsetTop;
|
||||
table[1].scrollTop = scrollValue;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const rowSelection = {
|
||||
onChange: (selectedRowKeys: []) => {
|
||||
setSelectedRows(selectedRowKeys);
|
||||
setClickedSpanData({});
|
||||
if (isEmpty(selectedRowKeys)) {
|
||||
setIsReset(true);
|
||||
} else {
|
||||
setIsReset(false);
|
||||
}
|
||||
},
|
||||
onSelect:(record)=>{
|
||||
handleRowOnClick(record)
|
||||
},
|
||||
selectedRowKeys: selectedRows,
|
||||
};
|
||||
|
||||
const handleRowOnClick = (record) => {
|
||||
setRowId(record.id);
|
||||
|
||||
let node: pushDStree = getTreeData(
|
||||
treeData,
|
||||
(item: pushDStree) => item.id === record.id,
|
||||
1,
|
||||
);
|
||||
setSpanTagsInfo({ data: node[0] });
|
||||
|
||||
const selectedRowKeys = selectedRows;
|
||||
if (selectedRowKeys.indexOf(record.id) >= 0) {
|
||||
selectedRowKeys.splice(selectedRowKeys.indexOf(record.key), 1);
|
||||
} else {
|
||||
selectedRowKeys.push(record.id);
|
||||
}
|
||||
setSelectedRows([record.id]);
|
||||
};
|
||||
|
||||
const handleOnExpandedRowsChange = (item) => {
|
||||
setDefaultExpandedRows(item);
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
{id !== "empty" && (
|
||||
<>
|
||||
<Row
|
||||
justify="end"
|
||||
gutter={32}
|
||||
style={{
|
||||
marginBottom: "24px",
|
||||
}}
|
||||
>
|
||||
<Col>
|
||||
<StyledButton onClick={handleFocusOnSelectedPath}>
|
||||
{" "}
|
||||
Focus on selected path{" "}
|
||||
</StyledButton>
|
||||
</Col>
|
||||
<Col>
|
||||
<StyledButton onClick={handleResetFocus}> Reset Focus </StyledButton>
|
||||
</Col>
|
||||
</Row>
|
||||
|
||||
<Table
|
||||
refs={tableRef}
|
||||
hideSelectAll={true}
|
||||
columns={columns}
|
||||
rowSelection={{ ...rowSelection, checkStrictly, type:'radio' }}
|
||||
dataSource={sortedTreeData}
|
||||
rowKey="id"
|
||||
sticky={true}
|
||||
onRow={(record, rowIndex) => {
|
||||
return {
|
||||
onClick: () => handleRowOnClick(record, rowIndex), // click row
|
||||
};
|
||||
}}
|
||||
expandedRowKeys={defaultExpandedRows}
|
||||
onExpandedRowsChange={handleOnExpandedRowsChange}
|
||||
pagination={false}
|
||||
scroll={{ y: 540}}
|
||||
rowClassName="row-styles"
|
||||
filterMultiple={false}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
};
|
||||
|
||||
export default TraceGanttChart;
|
||||
@@ -36,10 +36,16 @@
|
||||
stroke-linecap: round;
|
||||
stroke-linejoin: round;
|
||||
}
|
||||
|
||||
/* Prevent text vertical shift on hover */
|
||||
.d3-flame-graph-label {
|
||||
border: 1px dotted transparent;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/* Transparency simulates sub pixel border https://stackoverflow.com/questions/13891177/css-border-less-than-1px */
|
||||
|
||||
.d3-flame-graph-label:hover {
|
||||
border: 1px dotted;
|
||||
border-color: rgba(255, 255, 255, 0.75);
|
||||
}
|
||||
/*
|
||||
@@ -47,3 +53,7 @@
|
||||
border: 1px solid;
|
||||
border-color: rgba(255, 255, 255, 0.75);
|
||||
} */
|
||||
|
||||
.fade:not(.show) {
|
||||
opacity: 0.5;
|
||||
}
|
||||
@@ -1,30 +1,65 @@
|
||||
import React, { useEffect, useState } from "react";
|
||||
import { useParams } from "react-router-dom";
|
||||
import { useParams, useLocation } from "react-router-dom";
|
||||
import { flamegraph } from "d3-flame-graph";
|
||||
import { connect } from "react-redux";
|
||||
import { Card, Button, Row, Col, Space } from "antd";
|
||||
import { Card, Row, Col, Space, Affix } from "antd";
|
||||
import * as d3 from "d3";
|
||||
import * as d3Tip from "d3-tip";
|
||||
|
||||
//import * as d3Tip from 'd3-tip';
|
||||
// PNOTE - uninstall @types/d3-tip. issues with importing d3-tip https://github.com/Caged/d3-tip/issues/181
|
||||
|
||||
import "./TraceGraph.css";
|
||||
import { spanToTreeUtil } from "../../utils/spanToTree";
|
||||
import { fetchTraceItem, spansWSameTraceIDResponse } from "../../store/actions";
|
||||
import {
|
||||
fetchTraceItem,
|
||||
pushDStree,
|
||||
spansWSameTraceIDResponse,
|
||||
} from "../../store/actions";
|
||||
import { StoreState } from "../../store/reducers";
|
||||
import { TraceGraphColumn } from "./TraceGraphColumn";
|
||||
import SelectedSpanDetails from "./SelectedSpanDetails";
|
||||
import TraceGanttChart from "./TraceGanttChart";
|
||||
import styled from "styled-components";
|
||||
import { isEmpty, sortBy } from "lodash-es";
|
||||
|
||||
interface TraceGraphProps {
|
||||
traceItem: spansWSameTraceIDResponse;
|
||||
fetchTraceItem: Function;
|
||||
}
|
||||
|
||||
const TraceGanttChartContainer = styled(Card)`
|
||||
background: #333333;
|
||||
border-radius: 5px;
|
||||
`;
|
||||
|
||||
const _TraceGraph = (props: TraceGraphProps) => {
|
||||
let location = useLocation();
|
||||
const spanId = location?.state?.spanId;
|
||||
const params = useParams<{ id?: string }>();
|
||||
const [clickedSpanTags, setClickedSpanTags] = useState([]);
|
||||
const [clickedSpanTags, setClickedSpanTags] = useState<pushDStree>([]);
|
||||
const [selectedSpan, setSelectedSpan] = useState({});
|
||||
const [clickedSpan, setClickedSpan] = useState(null);
|
||||
const [resetZoom, setResetZoom] = useState(false);
|
||||
const [sortedTreeData, setSortedTreeData] = useState<pushDStree>([]);
|
||||
|
||||
let sortedData = {};
|
||||
|
||||
const getSortedData = (treeData: [pushDStree], parent = {}) => {
|
||||
if (!isEmpty(treeData)) {
|
||||
if (treeData[0].id !== "empty") {
|
||||
return Array.from(treeData).map((item, key) => {
|
||||
if (!isEmpty(item.children)) {
|
||||
getSortedData(item.children, item);
|
||||
sortedData = sortBy(item.children, (i) => i.startTime);
|
||||
treeData[key].children = sortedData;
|
||||
}
|
||||
if (!isEmpty(parent)) {
|
||||
treeData[key].parent = parent;
|
||||
}
|
||||
return treeData;
|
||||
});
|
||||
}
|
||||
return treeData;
|
||||
}
|
||||
};
|
||||
|
||||
const tree = spanToTreeUtil(props.traceItem[0].events);
|
||||
|
||||
useEffect(() => {
|
||||
//sets span width based on value - which is mapped to duration
|
||||
@@ -32,16 +67,31 @@ const _TraceGraph = (props: TraceGraphProps) => {
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
if (props.traceItem || resetZoom) {
|
||||
const tree = spanToTreeUtil(props.traceItem[0].events);
|
||||
if (props.traceItem) {
|
||||
let sortedData = getSortedData([tree]);
|
||||
setSortedTreeData(sortedData?.[0]);
|
||||
getSpanInfo(sortedData?.[0], spanId);
|
||||
// This is causing element to change ref. Can use both useRef or this approach.
|
||||
d3.select("#chart").datum(tree).call(chart);
|
||||
setResetZoom(false);
|
||||
d3.select("#chart").datum(tree).call(chart).sort(item=>item.startTime);
|
||||
}
|
||||
}, [props.traceItem, resetZoom]);
|
||||
}, [props.traceItem]);
|
||||
// if this monitoring of props.traceItem.data is removed then zoom on click doesn't work
|
||||
// Doesn't work if only do initial check, works if monitor an element - as it may get updated in sometime
|
||||
|
||||
useEffect(() => {
|
||||
if(!isEmpty(sortedTreeData) && sortedTreeData?.id !== "empty" && isEmpty(clickedSpanTags)) {
|
||||
setClickedSpanTags(sortedTreeData?.[0]);
|
||||
}
|
||||
}, [sortedTreeData]);
|
||||
|
||||
useEffect(() => {
|
||||
if (resetZoom) {
|
||||
// This is causing element to change ref. Can use both useRef or this approach.
|
||||
d3.select("#chart").datum(tree).call(chart).sort(item=>item.startTime);
|
||||
setResetZoom(false);
|
||||
}
|
||||
}, [resetZoom]);
|
||||
|
||||
const tip = d3Tip
|
||||
.default()
|
||||
.attr("class", "d3-tip")
|
||||
@@ -50,34 +100,59 @@ const _TraceGraph = (props: TraceGraphProps) => {
|
||||
});
|
||||
|
||||
const onClick = (z: any) => {
|
||||
setClickedSpanTags(z.data.tags);
|
||||
setClickedSpanTags(z.data);
|
||||
setClickedSpan(z.data);
|
||||
setSelectedSpan([]);
|
||||
console.log(`Clicked on ${z.data.name}, id: "${z.id}"`);
|
||||
};
|
||||
|
||||
const setSpanTagsInfo = (z: any) => {
|
||||
setClickedSpanTags(z.data);
|
||||
};
|
||||
|
||||
const getSpanInfo = (data: [pushDStree], spanId: string): void => {
|
||||
if (resetZoom) {
|
||||
setSelectedSpan({});
|
||||
return;
|
||||
}
|
||||
if (data?.[0]?.id !== "empty") {
|
||||
Array.from(data).map((item) => {
|
||||
if (item.id === spanId) {
|
||||
setSelectedSpan(item);
|
||||
setClickedSpanTags(item);
|
||||
return item;
|
||||
} else if (!isEmpty(item.children)) {
|
||||
getSpanInfo(item.children, spanId);
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const chart = flamegraph()
|
||||
.cellHeight(18)
|
||||
.transitionDuration(500)
|
||||
.inverted(true)
|
||||
.tooltip(tip)
|
||||
.minFrameSize(10)
|
||||
.minFrameSize(4)
|
||||
.elided(false)
|
||||
.differential(false)
|
||||
.sort(true)
|
||||
.sort((item) => item.startTime)
|
||||
//Use self value=true when we're using not using aggregated option, Which is not our case.
|
||||
// In that case it's doing step function sort of stuff thru computation.
|
||||
// Source flamegraph.js line 557 and 573.
|
||||
// .selfValue(true)
|
||||
.onClick(onClick);
|
||||
.onClick(onClick)
|
||||
.width(800);
|
||||
|
||||
const handleResetZoom = (value) => {
|
||||
setResetZoom(value);
|
||||
};
|
||||
|
||||
return (
|
||||
<Row gutter={{ xs: 8, sm: 16, md: 24, lg: 32 }}>
|
||||
{/*<Col md={8} sm={24}>*/}
|
||||
{/* <TraceGraphColumn />*/}
|
||||
{/*</Col>*/}
|
||||
<Col md={24} sm={24}>
|
||||
{/* <Card style={{ width: 640 }}> */}
|
||||
<Col md={18} sm={18}>
|
||||
<Space direction="vertical" size="middle" style={{ width: "100%" }}>
|
||||
<Card bodyStyle={{ padding: 80 }} style={{ height: 320 }}>
|
||||
<Card bodyStyle={{ padding: 24 }} style={{ height: 320 }}>
|
||||
<div
|
||||
style={{
|
||||
display: "flex",
|
||||
@@ -89,20 +164,27 @@ const _TraceGraph = (props: TraceGraphProps) => {
|
||||
<div style={{ textAlign: "center" }}>
|
||||
Trace Graph component ID is {params.id}{" "}
|
||||
</div>
|
||||
<Button
|
||||
type="primary"
|
||||
onClick={setResetZoom.bind(this, true)}
|
||||
style={{ width: 160 }}
|
||||
>
|
||||
Reset Zoom
|
||||
</Button>
|
||||
<div id="chart" style={{ fontSize: 12, marginTop: 20 }}></div>
|
||||
</div>
|
||||
</Card>
|
||||
|
||||
<SelectedSpanDetails clickedSpanTags={clickedSpanTags} />
|
||||
<Affix offsetTop={24}>
|
||||
<TraceGanttChartContainer id={"collapsable"}>
|
||||
<TraceGanttChart
|
||||
treeData={sortedTreeData}
|
||||
clickedSpan={clickedSpan}
|
||||
selectedSpan={selectedSpan}
|
||||
resetZoom={handleResetZoom}
|
||||
setSpanTagsInfo={setSpanTagsInfo}
|
||||
/>
|
||||
</TraceGanttChartContainer>
|
||||
</Affix>
|
||||
</Space>
|
||||
</Col>
|
||||
<Col md={6} sm={6}>
|
||||
<Affix offsetTop={24}>
|
||||
<SelectedSpanDetails data={clickedSpanTags} />
|
||||
</Affix>
|
||||
</Col>
|
||||
</Row>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import React, { useEffect } from "react";
|
||||
import { connect } from "react-redux";
|
||||
import { NavLink } from "react-router-dom";
|
||||
import { useHistory} from "react-router-dom";
|
||||
import { Space, Table } from "antd";
|
||||
import ROUTES from "Src/constants/routes";
|
||||
|
||||
@@ -10,9 +10,14 @@ import { isOnboardingSkipped } from "../../utils/app";
|
||||
import moment from "moment";
|
||||
import styled from "styled-components";
|
||||
|
||||
const StyledTable = styled(Table)`
|
||||
cursor: pointer;
|
||||
`
|
||||
|
||||
const TraceHeader = styled.div`
|
||||
margin: 16px 0;
|
||||
`;
|
||||
|
||||
interface TraceListProps {
|
||||
traces: traceResponseNew;
|
||||
fetchTraces: Function;
|
||||
@@ -25,26 +30,17 @@ interface TableDataSourceItem {
|
||||
operationName: string;
|
||||
startTime: number;
|
||||
duration: number;
|
||||
service: string;
|
||||
}
|
||||
|
||||
const _TraceList = (props: TraceListProps) => {
|
||||
// PNOTE (TO DO) - Currently this use of useEffect gives warning. May need to memoise fetchtraces - https://stackoverflow.com/questions/55840294/how-to-fix-missing-dependency-warning-when-using-useeffect-react-hook
|
||||
let history = useHistory();
|
||||
|
||||
useEffect(() => {
|
||||
props.fetchTraces();
|
||||
}, []);
|
||||
|
||||
// PNOTE - code snippet -
|
||||
// renderList(): JSX.Element[] {
|
||||
// return this.props.todos.map((todo: Todo) => {
|
||||
// return (
|
||||
// <div onClick={() => this.onTodoClick(todo.id)} key={todo.id}>
|
||||
// {todo.title}
|
||||
// </div>
|
||||
// );
|
||||
// });
|
||||
// }
|
||||
|
||||
const columns: any = [
|
||||
{
|
||||
title: "Start Time",
|
||||
@@ -57,12 +53,9 @@ const _TraceList = (props: TraceListProps) => {
|
||||
// new Date() assumes input in milliseconds. Start Time stamp returned by druid api for span list is in ms
|
||||
},
|
||||
{
|
||||
title: "Duration (in ms)",
|
||||
dataIndex: "duration",
|
||||
key: "duration",
|
||||
sorter: (a: any, b: any) => a.duration - b.duration,
|
||||
sortDirections: ["descend", "ascend"],
|
||||
render: (value: number) => (value / 1000000).toFixed(2),
|
||||
title: "Service",
|
||||
dataIndex: "service",
|
||||
key: "service",
|
||||
},
|
||||
{
|
||||
title: "Operation",
|
||||
@@ -70,13 +63,12 @@ const _TraceList = (props: TraceListProps) => {
|
||||
key: "operationName",
|
||||
},
|
||||
{
|
||||
title: "TraceID",
|
||||
dataIndex: "traceid",
|
||||
key: "traceid",
|
||||
render: (text: string) => (
|
||||
<NavLink to={ROUTES.TRACES + "/" + text}>{text.slice(-16)}</NavLink>
|
||||
),
|
||||
//only last 16 chars have traceID, druid makes it 32 by adding zeros
|
||||
title: "Duration (in ms)",
|
||||
dataIndex: "duration",
|
||||
key: "duration",
|
||||
sorter: (a: any, b: any) => a.duration - b.duration,
|
||||
sortDirections: ["descend", "ascend"],
|
||||
render: (value: number) => (value / 1000000).toFixed(2),
|
||||
},
|
||||
];
|
||||
|
||||
@@ -87,8 +79,6 @@ const _TraceList = (props: TraceListProps) => {
|
||||
typeof props.traces[0] !== "undefined" &&
|
||||
props.traces[0].events.length > 0
|
||||
) {
|
||||
//PNOTE - Template literal should be wrapped in curly braces for it to be evaluated
|
||||
|
||||
props.traces[0].events.map(
|
||||
(item: (number | string | string[] | pushDStree[])[], index) => {
|
||||
if (
|
||||
@@ -96,7 +86,8 @@ const _TraceList = (props: TraceListProps) => {
|
||||
typeof item[4] === "string" &&
|
||||
typeof item[6] === "string" &&
|
||||
typeof item[1] === "string" &&
|
||||
typeof item[2] === "string"
|
||||
typeof item[2] === "string" &&
|
||||
typeof item[3] === "string"
|
||||
)
|
||||
dataSource.push({
|
||||
startTime: item[0],
|
||||
@@ -105,13 +96,30 @@ const _TraceList = (props: TraceListProps) => {
|
||||
spanid: item[1],
|
||||
traceid: item[2],
|
||||
key: index.toString(),
|
||||
service: item[3],
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
//antd table in typescript - https://codesandbox.io/s/react-typescript-669cv
|
||||
|
||||
return <Table dataSource={dataSource} columns={columns} size="middle" />;
|
||||
return <StyledTable
|
||||
dataSource={dataSource}
|
||||
columns={columns}
|
||||
size="middle"
|
||||
rowClassName=""
|
||||
onRow={(record) => ({
|
||||
onClick: () => {
|
||||
history.push({
|
||||
pathname: ROUTES.TRACES + "/" + record.traceid,
|
||||
state: {
|
||||
spanId: record.spanid,
|
||||
},
|
||||
});
|
||||
}
|
||||
})}
|
||||
/>
|
||||
;
|
||||
} else {
|
||||
if (isOnboardingSkipped()) {
|
||||
return (
|
||||
@@ -136,7 +144,7 @@ const _TraceList = (props: TraceListProps) => {
|
||||
|
||||
return (
|
||||
<div>
|
||||
<TraceHeader>List of traces with spanID</TraceHeader>
|
||||
<TraceHeader>List of filtered spans</TraceHeader>
|
||||
<div>{renderTraces()}</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -1,18 +1,19 @@
|
||||
import React, { useEffect, useMemo, useState } from "react";
|
||||
import React, { useEffect, useState } from "react";
|
||||
import { Bar } from "react-chartjs-2";
|
||||
import { Card, Form, Select, Space } from "antd";
|
||||
import { Card, Select, Space } from "antd";
|
||||
import { connect } from "react-redux";
|
||||
|
||||
import {
|
||||
getServicesList,
|
||||
getUsageData,
|
||||
GlobalTime,
|
||||
servicesListItem,
|
||||
usageDataItem,
|
||||
} from "../../store/actions";
|
||||
import { StoreState } from "../../store/reducers";
|
||||
import moment from "moment";
|
||||
import { isOnboardingSkipped } from "../../utils/app";
|
||||
import { useRoute } from "../RouteProvider";
|
||||
import { servicesListItem } from "../../store/actions/MetricsActions";
|
||||
const { Option } = Select;
|
||||
|
||||
interface UsageExplorerProps {
|
||||
@@ -48,7 +49,6 @@ const interval = [
|
||||
label: "Hours",
|
||||
applicableOn: [timeDaysOptions[2], timeDaysOptions[1]],
|
||||
},
|
||||
,
|
||||
];
|
||||
|
||||
const _UsageExplorer = (props: UsageExplorerProps) => {
|
||||
@@ -56,6 +56,8 @@ const _UsageExplorer = (props: UsageExplorerProps) => {
|
||||
const [selectedInterval, setSelectedInterval] = useState(interval[2]);
|
||||
const [selectedService, setSelectedService] = useState<string>("");
|
||||
|
||||
const { state } = useRoute();
|
||||
|
||||
useEffect(() => {
|
||||
if (selectedTime && selectedInterval) {
|
||||
const maxTime = new Date().getTime() * 1000000;
|
||||
@@ -71,7 +73,13 @@ const _UsageExplorer = (props: UsageExplorerProps) => {
|
||||
}, [selectedTime, selectedInterval, selectedService]);
|
||||
|
||||
useEffect(() => {
|
||||
props.getServicesList(props.globalTime);
|
||||
/*
|
||||
Call the apis only when the route is loaded.
|
||||
Check this issue: https://github.com/SigNoz/signoz/issues/110
|
||||
*/
|
||||
if (state.USAGE_EXPLORER.isLoaded) {
|
||||
props.getServicesList(props.globalTime);
|
||||
}
|
||||
}, []);
|
||||
|
||||
const data = {
|
||||
@@ -203,7 +211,7 @@ const mapStateToProps = (
|
||||
totalCount: totalCount,
|
||||
usageData: state.usageDate,
|
||||
globalTime: state.globalTime,
|
||||
servicesList: state.servicesList,
|
||||
servicesList: state.metricsData.serviceList,
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
3
frontend/src/store/actions/MetricsActions/index.ts
Normal file
3
frontend/src/store/actions/MetricsActions/index.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
export * from "./metricsInterfaces";
|
||||
export * from "./metricsActionTypes";
|
||||
export * from "./metricsActions";
|
||||
@@ -0,0 +1,32 @@
|
||||
import {
|
||||
externalErrCodeMetricsActions,
|
||||
externalMetricsAvgDurationAction,
|
||||
getDbOverViewMetricsAction,
|
||||
getExternalMetricsAction,
|
||||
getFilteredTraceMetricsAction,
|
||||
getServiceMetricsAction,
|
||||
getServicesListAction,
|
||||
getTopEndpointsAction,
|
||||
} from "./metricsInterfaces";
|
||||
|
||||
export enum MetricsActionTypes {
|
||||
updateInput = "UPDATE_INPUT",
|
||||
getServicesList = "GET_SERVICE_LIST",
|
||||
getServiceMetrics = "GET_SERVICE_METRICS",
|
||||
getAvgDurationMetrics = "GET_AVG_DURATION_METRICS",
|
||||
getErrCodeMetrics = "GET_ERR_CODE_METRICS",
|
||||
getDbOverviewMetrics = "GET_DB_OVERVIEW_METRICS",
|
||||
getExternalMetrics = "GET_EXTERNAL_METRICS",
|
||||
getTopEndpoints = "GET_TOP_ENDPOINTS",
|
||||
getFilteredTraceMetrics = "GET_FILTERED_TRACE_METRICS",
|
||||
}
|
||||
|
||||
export type MetricsActions =
|
||||
| getServicesListAction
|
||||
| getServiceMetricsAction
|
||||
| getTopEndpointsAction
|
||||
| getFilteredTraceMetricsAction
|
||||
| getExternalMetricsAction
|
||||
| externalErrCodeMetricsActions
|
||||
| getDbOverViewMetricsAction
|
||||
| externalMetricsAvgDurationAction;
|
||||
190
frontend/src/store/actions/MetricsActions/metricsActions.ts
Normal file
190
frontend/src/store/actions/MetricsActions/metricsActions.ts
Normal file
@@ -0,0 +1,190 @@
|
||||
import { Dispatch } from "redux";
|
||||
import api, { apiV1 } from "../../../api";
|
||||
|
||||
import { GlobalTime } from "../global";
|
||||
import { toUTCEpoch } from "../../../utils/timeUtils";
|
||||
import { MetricsActionTypes } from "./metricsActionTypes";
|
||||
import * as MetricsInterfaces from "./metricsInterfaces";
|
||||
|
||||
export const getServicesList = (globalTime: GlobalTime) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/services?start=" + globalTime.minTime + "&end=" + globalTime.maxTime;
|
||||
|
||||
const response = await api.get<MetricsInterfaces.servicesListItem[]>(
|
||||
apiV1 + request_string,
|
||||
);
|
||||
|
||||
dispatch<MetricsInterfaces.getServicesListAction>({
|
||||
type: MetricsActionTypes.getServicesList,
|
||||
payload: response.data,
|
||||
//PNOTE - response.data in the axios response has the actual API response
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getDbOverViewMetrics = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/dbOverview?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime +
|
||||
"&step=60";
|
||||
const response = await api.get<MetricsInterfaces.dbOverviewMetricsItem[]>(
|
||||
apiV1 + request_string,
|
||||
);
|
||||
dispatch<MetricsInterfaces.getDbOverViewMetricsAction>({
|
||||
type: MetricsActionTypes.getDbOverviewMetrics,
|
||||
payload: response.data,
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getExternalMetrics = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/external?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime +
|
||||
"&step=60";
|
||||
const response = await api.get<MetricsInterfaces.externalMetricsItem[]>(
|
||||
apiV1 + request_string,
|
||||
);
|
||||
dispatch<MetricsInterfaces.getExternalMetricsAction>({
|
||||
type: MetricsActionTypes.getExternalMetrics,
|
||||
payload: response.data,
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getExternalAvgDurationMetrics = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/externalAvgDuration?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime +
|
||||
"&step=60";
|
||||
|
||||
const response = await api.get<
|
||||
MetricsInterfaces.externalMetricsAvgDurationItem[]
|
||||
>(apiV1 + request_string);
|
||||
dispatch<MetricsInterfaces.externalMetricsAvgDurationAction>({
|
||||
type: MetricsActionTypes.getAvgDurationMetrics,
|
||||
payload: response.data,
|
||||
});
|
||||
};
|
||||
};
|
||||
export const getExternalErrCodeMetrics = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/externalErrors?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime +
|
||||
"&step=60";
|
||||
const response = await api.get<
|
||||
MetricsInterfaces.externalErrCodeMetricsItem[]
|
||||
>(apiV1 + request_string);
|
||||
|
||||
dispatch<MetricsInterfaces.externalErrCodeMetricsActions>({
|
||||
type: MetricsActionTypes.getErrCodeMetrics,
|
||||
payload: response.data,
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getServicesMetrics = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/overview?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime +
|
||||
"&step=60";
|
||||
const response = await api.get<MetricsInterfaces.metricItem[]>(
|
||||
apiV1 + request_string,
|
||||
);
|
||||
|
||||
dispatch<MetricsInterfaces.getServiceMetricsAction>({
|
||||
type: MetricsActionTypes.getServiceMetrics,
|
||||
payload: response.data,
|
||||
//PNOTE - response.data in the axios response has the actual API response
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getTopEndpoints = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/top_endpoints?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime;
|
||||
const response = await api.get<MetricsInterfaces.topEndpointListItem[]>(
|
||||
apiV1 + request_string,
|
||||
);
|
||||
|
||||
dispatch<MetricsInterfaces.getTopEndpointsAction>({
|
||||
type: MetricsActionTypes.getTopEndpoints,
|
||||
payload: response.data,
|
||||
//PNOTE - response.data in the axios response has the actual API response
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getFilteredTraceMetrics = (
|
||||
filter_params: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/spans/aggregates?start=" +
|
||||
toUTCEpoch(globalTime.minTime) +
|
||||
"&end=" +
|
||||
toUTCEpoch(globalTime.maxTime) +
|
||||
"&" +
|
||||
filter_params;
|
||||
const response = await api.get<MetricsInterfaces.customMetricsItem[]>(
|
||||
apiV1 + request_string,
|
||||
);
|
||||
|
||||
dispatch<MetricsInterfaces.getFilteredTraceMetricsAction>({
|
||||
type: MetricsActionTypes.getFilteredTraceMetrics,
|
||||
payload: response.data,
|
||||
//PNOTE - response.data in the axios response has the actual API response
|
||||
});
|
||||
};
|
||||
};
|
||||
@@ -0,0 +1,98 @@
|
||||
import { MetricsActionTypes } from "./metricsActionTypes";
|
||||
|
||||
export interface servicesListItem {
|
||||
serviceName: string;
|
||||
p99: number;
|
||||
avgDuration: number;
|
||||
numCalls: number;
|
||||
callRate: number;
|
||||
numErrors: number;
|
||||
errorRate: number;
|
||||
}
|
||||
|
||||
export interface metricItem {
|
||||
timestamp: number;
|
||||
p50: number;
|
||||
p95: number;
|
||||
p99: number;
|
||||
numCalls: number;
|
||||
callRate: number;
|
||||
numErrors: number;
|
||||
errorRate: number;
|
||||
}
|
||||
|
||||
export interface externalMetricsAvgDurationItem {
|
||||
avgDuration: number;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface externalErrCodeMetricsItem {
|
||||
externalHttpUrl: string;
|
||||
numCalls: number;
|
||||
timestamp: number;
|
||||
callRate: number;
|
||||
}
|
||||
export interface topEndpointListItem {
|
||||
p50: number;
|
||||
p95: number;
|
||||
p99: number;
|
||||
numCalls: number;
|
||||
name: string;
|
||||
}
|
||||
|
||||
export interface externalMetricsItem {
|
||||
avgDuration: number;
|
||||
callRate: number;
|
||||
externalHttpUrl: string;
|
||||
numCalls: number;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface dbOverviewMetricsItem {
|
||||
avgDuration: number;
|
||||
callRate: number;
|
||||
dbSystem: string;
|
||||
numCalls: number;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface customMetricsItem {
|
||||
timestamp: number;
|
||||
value: number;
|
||||
}
|
||||
|
||||
export interface getServicesListAction {
|
||||
type: MetricsActionTypes.getServicesList;
|
||||
payload: servicesListItem[];
|
||||
}
|
||||
|
||||
export interface externalErrCodeMetricsActions {
|
||||
type: MetricsActionTypes.getErrCodeMetrics;
|
||||
payload: externalErrCodeMetricsItem[];
|
||||
}
|
||||
export interface externalMetricsAvgDurationAction {
|
||||
type: MetricsActionTypes.getAvgDurationMetrics;
|
||||
payload: externalMetricsAvgDurationItem[];
|
||||
}
|
||||
export interface getServiceMetricsAction {
|
||||
type: MetricsActionTypes.getServiceMetrics;
|
||||
payload: metricItem[];
|
||||
}
|
||||
export interface getExternalMetricsAction {
|
||||
type: MetricsActionTypes.getExternalMetrics;
|
||||
payload: externalMetricsItem[];
|
||||
}
|
||||
|
||||
export interface getDbOverViewMetricsAction {
|
||||
type: MetricsActionTypes.getDbOverviewMetrics;
|
||||
payload: dbOverviewMetricsItem[];
|
||||
}
|
||||
export interface getTopEndpointsAction {
|
||||
type: MetricsActionTypes.getTopEndpoints;
|
||||
payload: topEndpointListItem[];
|
||||
}
|
||||
|
||||
export interface getFilteredTraceMetricsAction {
|
||||
type: MetricsActionTypes.getFilteredTraceMetrics;
|
||||
payload: customMetricsItem[];
|
||||
}
|
||||
@@ -2,6 +2,6 @@ export * from "./types";
|
||||
export * from "./traceFilters";
|
||||
export * from "./serviceMap";
|
||||
export * from "./traces";
|
||||
export * from "./metrics";
|
||||
export * from "./MetricsActions";
|
||||
export * from "./usage";
|
||||
export * from "./global";
|
||||
|
||||
@@ -1,277 +0,0 @@
|
||||
import { Dispatch } from "redux";
|
||||
import api, { apiV1 } from "../../api";
|
||||
|
||||
import { GlobalTime } from "./global";
|
||||
import { ActionTypes } from "./types";
|
||||
import { Token } from "../../utils/token";
|
||||
import { toUTCEpoch } from "../../utils/timeUtils";
|
||||
|
||||
export interface servicesListItem {
|
||||
serviceName: string;
|
||||
p99: number;
|
||||
avgDuration: number;
|
||||
numCalls: number;
|
||||
callRate: number;
|
||||
numErrors: number;
|
||||
errorRate: number;
|
||||
}
|
||||
|
||||
export interface metricItem {
|
||||
timestamp: number;
|
||||
p50: number;
|
||||
p95: number;
|
||||
p99: number;
|
||||
numCalls: number;
|
||||
callRate: number;
|
||||
numErrors: number;
|
||||
errorRate: number;
|
||||
}
|
||||
|
||||
export interface externalMetricsAvgDurationItem {
|
||||
avgDuration: number;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface externalErrCodeMetricsItem {
|
||||
errorRate: number;
|
||||
externalHttpUrl: string;
|
||||
numErrors: number;
|
||||
timestamp: number;
|
||||
}
|
||||
export interface topEndpointListItem {
|
||||
p50: number;
|
||||
p90: number;
|
||||
p99: number;
|
||||
numCalls: number;
|
||||
name: string;
|
||||
}
|
||||
|
||||
export interface externalMetricsItem {
|
||||
avgDuration: number;
|
||||
callRate: number;
|
||||
externalHttpUrl: string;
|
||||
numCalls: number;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface dbOverviewMetricsItem {
|
||||
avgDuration: number;
|
||||
callRate: number;
|
||||
dbSystem: string;
|
||||
numCalls: number;
|
||||
timestamp: number;
|
||||
}
|
||||
|
||||
export interface customMetricsItem {
|
||||
timestamp: number;
|
||||
value: number;
|
||||
}
|
||||
|
||||
export interface getServicesListAction {
|
||||
type: ActionTypes.getServicesList;
|
||||
payload: servicesListItem[];
|
||||
}
|
||||
|
||||
export interface externalErrCodeMetricsActions {
|
||||
type: ActionTypes.getErrCodeMetrics;
|
||||
payload: externalErrCodeMetricsItem[];
|
||||
}
|
||||
export interface externalMetricsAvgDurationAction {
|
||||
type: ActionTypes.getAvgDurationMetrics;
|
||||
payload: externalMetricsAvgDurationItem[];
|
||||
}
|
||||
export interface getServiceMetricsAction {
|
||||
type: ActionTypes.getServiceMetrics;
|
||||
payload: metricItem[];
|
||||
}
|
||||
export interface getExternalMetricsAction {
|
||||
type: ActionTypes.getExternalMetrics;
|
||||
payload: externalMetricsItem[];
|
||||
}
|
||||
|
||||
export interface getDbOverViewMetricsAction {
|
||||
type: ActionTypes.getDbOverviewMetrics;
|
||||
payload: dbOverviewMetricsItem[];
|
||||
}
|
||||
export interface getTopEndpointsAction {
|
||||
type: ActionTypes.getTopEndpoints;
|
||||
payload: topEndpointListItem[];
|
||||
}
|
||||
|
||||
export interface getFilteredTraceMetricsAction {
|
||||
type: ActionTypes.getFilteredTraceMetrics;
|
||||
payload: customMetricsItem[];
|
||||
}
|
||||
|
||||
export const getServicesList = (globalTime: GlobalTime) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/services?start=" + globalTime.minTime + "&end=" + globalTime.maxTime;
|
||||
|
||||
const response = await api.get<servicesListItem[]>(apiV1 + request_string);
|
||||
|
||||
dispatch<getServicesListAction>({
|
||||
type: ActionTypes.getServicesList,
|
||||
payload: response.data,
|
||||
//PNOTE - response.data in the axios response has the actual API response
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getDbOverViewMetrics = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/dbOverview?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime +
|
||||
"&step=60";
|
||||
const response = await api.get<dbOverviewMetricsItem[]>(
|
||||
apiV1 + request_string,
|
||||
);
|
||||
dispatch<getDbOverViewMetricsAction>({
|
||||
type: ActionTypes.getDbOverviewMetrics,
|
||||
payload: response.data,
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getExternalMetrics = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/external?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime +
|
||||
"&step=60";
|
||||
const response = await api.get<externalMetricsItem[]>(apiV1 + request_string);
|
||||
dispatch<getExternalMetricsAction>({
|
||||
type: ActionTypes.getExternalMetrics,
|
||||
payload: response.data,
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getExternalAvgDurationMetrics = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/externalAvgDuration?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime +
|
||||
"&step=60";
|
||||
|
||||
const response = await api.get<externalMetricsAvgDurationItem[]>(
|
||||
apiV1 + request_string,
|
||||
);
|
||||
dispatch<externalMetricsAvgDurationAction>({
|
||||
type: ActionTypes.getAvgDurationMetrics,
|
||||
payload: response.data,
|
||||
});
|
||||
};
|
||||
};
|
||||
export const getExternalErrCodeMetrics = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/externalErrors?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime +
|
||||
"&step=60";
|
||||
const response = await api.get<externalErrCodeMetricsItem[]>(
|
||||
apiV1 + request_string,
|
||||
);
|
||||
|
||||
dispatch<externalErrCodeMetricsActions>({
|
||||
type: ActionTypes.getErrCodeMetrics,
|
||||
payload: response.data,
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getServicesMetrics = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/overview?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime +
|
||||
"&step=60";
|
||||
const response = await api.get<metricItem[]>(apiV1 + request_string);
|
||||
|
||||
dispatch<getServiceMetricsAction>({
|
||||
type: ActionTypes.getServiceMetrics,
|
||||
payload: response.data,
|
||||
//PNOTE - response.data in the axios response has the actual API response
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getTopEndpoints = (
|
||||
serviceName: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/service/top_endpoints?service=" +
|
||||
serviceName +
|
||||
"&start=" +
|
||||
globalTime.minTime +
|
||||
"&end=" +
|
||||
globalTime.maxTime;
|
||||
const response = await api.get<topEndpointListItem[]>(apiV1 + request_string);
|
||||
|
||||
dispatch<getTopEndpointsAction>({
|
||||
type: ActionTypes.getTopEndpoints,
|
||||
payload: response.data,
|
||||
//PNOTE - response.data in the axios response has the actual API response
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
export const getFilteredTraceMetrics = (
|
||||
filter_params: string,
|
||||
globalTime: GlobalTime,
|
||||
) => {
|
||||
return async (dispatch: Dispatch) => {
|
||||
let request_string =
|
||||
"/spans/aggregates?start=" +
|
||||
toUTCEpoch(globalTime.minTime) +
|
||||
"&end=" +
|
||||
toUTCEpoch(globalTime.maxTime) +
|
||||
"&" +
|
||||
filter_params;
|
||||
const response = await api.get<customMetricsItem[]>(apiV1 + request_string);
|
||||
|
||||
dispatch<getFilteredTraceMetricsAction>({
|
||||
type: ActionTypes.getFilteredTraceMetrics,
|
||||
payload: response.data,
|
||||
//PNOTE - response.data in the axios response has the actual API response
|
||||
});
|
||||
};
|
||||
};
|
||||
@@ -32,16 +32,4 @@ export const updateTraceFilters = (traceFilters: TraceFilters) => {
|
||||
};
|
||||
};
|
||||
|
||||
export interface updateInputTagAction {
|
||||
type: ActionTypes.updateInput;
|
||||
payload: string;
|
||||
}
|
||||
|
||||
export const updateInputTag = (Input: string) => {
|
||||
return {
|
||||
type: ActionTypes.updateInput,
|
||||
payload: Input,
|
||||
};
|
||||
};
|
||||
|
||||
//named export when you want to export multiple functions from the same file
|
||||
|
||||
@@ -1,36 +1,18 @@
|
||||
import { FetchTracesAction, FetchTraceItemAction } from "./traces";
|
||||
import { updateTraceFiltersAction, updateInputTagAction } from "./traceFilters";
|
||||
import {
|
||||
getServicesListAction,
|
||||
getServiceMetricsAction,
|
||||
externalErrCodeMetricsActions,
|
||||
externalMetricsAvgDurationAction,
|
||||
getExternalMetricsAction,
|
||||
getTopEndpointsAction,
|
||||
getFilteredTraceMetricsAction,
|
||||
getDbOverViewMetricsAction,
|
||||
} from "./metrics";
|
||||
|
||||
import { serviceMapItemAction, servicesAction } from "./serviceMap";
|
||||
import { getUsageDataAction } from "./usage";
|
||||
import { updateTimeIntervalAction } from "./global";
|
||||
|
||||
export enum ActionTypes {
|
||||
updateTraceFilters = "UPDATE_TRACES_FILTER",
|
||||
updateInput = "UPDATE_INPUT",
|
||||
fetchTraces = "FETCH_TRACES",
|
||||
fetchTraceItem = "FETCH_TRACE_ITEM",
|
||||
getServicesList = "GET_SERVICE_LIST",
|
||||
getServiceMetrics = "GET_SERVICE_METRICS",
|
||||
getAvgDurationMetrics = "GET_AVG_DURATION_METRICS",
|
||||
getErrCodeMetrics = "GET_ERR_CODE_METRICS",
|
||||
getDbOverviewMetrics = "GET_DB_OVERVIEW_METRICS",
|
||||
getExternalMetrics = "GET_EXTERNAL_METRICS",
|
||||
getTopEndpoints = "GET_TOP_ENDPOINTS",
|
||||
getUsageData = "GET_USAGE_DATE",
|
||||
updateTimeInterval = "UPDATE_TIME_INTERVAL",
|
||||
getFilteredTraceMetrics = "GET_FILTERED_TRACE_METRICS",
|
||||
getServiceMapItems = "GET_SERVICE_MAP_ITEMS",
|
||||
getServices = "GET_SERVICES",
|
||||
getUsageData = "GET_USAGE_DATE",
|
||||
fetchTraces = "FETCH_TRACES",
|
||||
fetchTraceItem = "FETCH_TRACE_ITEM",
|
||||
}
|
||||
|
||||
export type Action =
|
||||
@@ -38,15 +20,7 @@ export type Action =
|
||||
| FetchTracesAction
|
||||
| updateTraceFiltersAction
|
||||
| updateInputTagAction
|
||||
| getServicesListAction
|
||||
| getServiceMetricsAction
|
||||
| getTopEndpointsAction
|
||||
| getUsageDataAction
|
||||
| updateTimeIntervalAction
|
||||
| getFilteredTraceMetricsAction
|
||||
| getExternalMetricsAction
|
||||
| externalErrCodeMetricsActions
|
||||
| getDbOverViewMetricsAction
|
||||
| servicesAction
|
||||
| serviceMapItemAction
|
||||
| externalMetricsAvgDurationAction;
|
||||
| serviceMapItemAction;
|
||||
|
||||
@@ -2,7 +2,6 @@ import { Dispatch } from "redux";
|
||||
import api, { apiV1 } from "../../api";
|
||||
|
||||
import { ActionTypes } from "./types";
|
||||
import { GlobalTime } from "./global";
|
||||
import { toUTCEpoch } from "../../utils/timeUtils";
|
||||
|
||||
export interface usageDataItem {
|
||||
|
||||
@@ -2,66 +2,35 @@ import { combineReducers } from "redux";
|
||||
import {
|
||||
traceResponseNew,
|
||||
spansWSameTraceIDResponse,
|
||||
servicesListItem,
|
||||
metricItem,
|
||||
topEndpointListItem,
|
||||
externalMetricsItem,
|
||||
externalMetricsAvgDurationItem,
|
||||
usageDataItem,
|
||||
GlobalTime,
|
||||
externalErrCodeMetricsItem,
|
||||
serviceMapStore,
|
||||
customMetricsItem,
|
||||
TraceFilters,
|
||||
} from "../actions";
|
||||
import { updateGlobalTimeReducer } from "./global";
|
||||
import {
|
||||
filteredTraceMetricsReducer,
|
||||
serviceMetricsReducer,
|
||||
externalErrCodeMetricsReducer,
|
||||
serviceTableReducer,
|
||||
topEndpointsReducer,
|
||||
dbOverviewMetricsReducer,
|
||||
externalMetricsReducer,
|
||||
externalAvgDurationMetricsReducer,
|
||||
} from "./metrics";
|
||||
import { traceFiltersReducer, inputsReducer } from "./traceFilters";
|
||||
import { MetricsInitialState, metricsReducer } from "./metrics";
|
||||
import TraceFilterReducer from "./traceFilters";
|
||||
import { traceItemReducer, tracesReducer } from "./traces";
|
||||
import { usageDataReducer } from "./usage";
|
||||
import { ServiceMapReducer } from "./serviceMap";
|
||||
|
||||
export interface StoreState {
|
||||
metricsData: MetricsInitialState;
|
||||
traceFilters: TraceFilters;
|
||||
inputTag: string;
|
||||
traces: traceResponseNew;
|
||||
traceItem: spansWSameTraceIDResponse;
|
||||
servicesList: servicesListItem[];
|
||||
serviceMetrics: metricItem[];
|
||||
topEndpointsList: topEndpointListItem[];
|
||||
externalMetrics: externalMetricsItem[];
|
||||
dbOverviewMetrics: externalMetricsItem[];
|
||||
externalAvgDurationMetrics: externalMetricsAvgDurationItem[];
|
||||
externalErrCodeMetrics: externalErrCodeMetricsItem[];
|
||||
usageDate: usageDataItem[];
|
||||
globalTime: GlobalTime;
|
||||
filteredTraceMetrics: customMetricsItem[];
|
||||
serviceMap: serviceMapStore;
|
||||
}
|
||||
|
||||
const reducers = combineReducers<StoreState>({
|
||||
traceFilters: traceFiltersReducer,
|
||||
inputTag: inputsReducer,
|
||||
traceFilters: TraceFilterReducer,
|
||||
traces: tracesReducer,
|
||||
traceItem: traceItemReducer,
|
||||
servicesList: serviceTableReducer,
|
||||
serviceMetrics: serviceMetricsReducer,
|
||||
dbOverviewMetrics: dbOverviewMetricsReducer,
|
||||
topEndpointsList: topEndpointsReducer,
|
||||
externalAvgDurationMetrics: externalAvgDurationMetricsReducer,
|
||||
externalMetrics: externalMetricsReducer,
|
||||
externalErrCodeMetrics: externalErrCodeMetricsReducer,
|
||||
usageDate: usageDataReducer,
|
||||
globalTime: updateGlobalTimeReducer,
|
||||
filteredTraceMetrics: filteredTraceMetricsReducer,
|
||||
metricsData: metricsReducer,
|
||||
serviceMap: ServiceMapReducer,
|
||||
});
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
import {
|
||||
ActionTypes,
|
||||
Action,
|
||||
servicesListItem,
|
||||
metricItem,
|
||||
topEndpointListItem,
|
||||
@@ -9,10 +7,21 @@ import {
|
||||
externalMetricsItem,
|
||||
dbOverviewMetricsItem,
|
||||
externalMetricsAvgDurationItem,
|
||||
} from "../actions";
|
||||
} from "../actions/MetricsActions";
|
||||
import { MetricsActionTypes as ActionTypes } from "../actions/MetricsActions/metricsActionTypes";
|
||||
|
||||
export const serviceTableReducer = (
|
||||
state: servicesListItem[] = [
|
||||
export type MetricsInitialState = {
|
||||
serviceList?: servicesListItem[];
|
||||
metricItems?: metricItem[];
|
||||
topEndpointListItem?: topEndpointListItem[];
|
||||
externalMetricsAvgDurationItem?: externalMetricsAvgDurationItem[];
|
||||
externalErrCodeMetricsItem?: externalErrCodeMetricsItem[];
|
||||
externalMetricsItem?: externalMetricsItem[];
|
||||
dbOverviewMetricsItem?: dbOverviewMetricsItem[];
|
||||
customMetricsItem?: customMetricsItem[];
|
||||
};
|
||||
export const metricsInitialState: MetricsInitialState = {
|
||||
serviceList: [
|
||||
{
|
||||
serviceName: "",
|
||||
p99: 0,
|
||||
@@ -23,22 +32,11 @@ export const serviceTableReducer = (
|
||||
errorRate: 0,
|
||||
},
|
||||
],
|
||||
action: Action,
|
||||
) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.getServicesList:
|
||||
return action.payload;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
};
|
||||
|
||||
export const serviceMetricsReducer = (
|
||||
state: metricItem[] = [
|
||||
metricItems: [
|
||||
{
|
||||
timestamp: 0,
|
||||
p50: 0,
|
||||
p90: 0,
|
||||
p95: 0,
|
||||
p99: 0,
|
||||
numCalls: 0,
|
||||
callRate: 0.0,
|
||||
@@ -46,49 +44,22 @@ export const serviceMetricsReducer = (
|
||||
errorRate: 0,
|
||||
},
|
||||
],
|
||||
action: Action,
|
||||
) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.getServiceMetrics:
|
||||
return action.payload;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
};
|
||||
|
||||
export const topEndpointsReducer = (
|
||||
state: topEndpointListItem[] = [
|
||||
{ p50: 0, p90: 0, p99: 0, numCalls: 0, name: "" },
|
||||
topEndpointListItem: [
|
||||
{
|
||||
p50: 0,
|
||||
p95: 0,
|
||||
p99: 0,
|
||||
numCalls: 0,
|
||||
name: "",
|
||||
},
|
||||
],
|
||||
action: Action,
|
||||
) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.getTopEndpoints:
|
||||
return action.payload;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
};
|
||||
|
||||
export const externalAvgDurationMetricsReducer = (
|
||||
state: externalMetricsAvgDurationItem[] = [
|
||||
externalMetricsAvgDurationItem: [
|
||||
{
|
||||
avgDuration: 0,
|
||||
timestamp: 0,
|
||||
},
|
||||
],
|
||||
action: Action,
|
||||
) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.getAvgDurationMetrics:
|
||||
return action.payload;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
};
|
||||
|
||||
export const externalErrCodeMetricsReducer = (
|
||||
state: externalErrCodeMetricsItem[] = [
|
||||
externalErrCodeMetricsItem: [
|
||||
{
|
||||
callRate: 0,
|
||||
externalHttpUrl: "",
|
||||
@@ -96,18 +67,7 @@ export const externalErrCodeMetricsReducer = (
|
||||
timestamp: 0,
|
||||
},
|
||||
],
|
||||
action: Action,
|
||||
) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.getErrCodeMetrics:
|
||||
return action.payload;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
};
|
||||
|
||||
export const externalMetricsReducer = (
|
||||
state: externalMetricsItem[] = [
|
||||
externalMetricsItem: [
|
||||
{
|
||||
avgDuration: 0,
|
||||
callRate: 0,
|
||||
@@ -116,18 +76,7 @@ export const externalMetricsReducer = (
|
||||
timestamp: 0,
|
||||
},
|
||||
],
|
||||
action: Action,
|
||||
) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.getExternalMetrics:
|
||||
return action.payload;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
};
|
||||
|
||||
export const dbOverviewMetricsReducer = (
|
||||
state: dbOverviewMetricsItem[] = [
|
||||
dbOverviewMetricsItem: [
|
||||
{
|
||||
avgDuration: 0,
|
||||
callRate: 0,
|
||||
@@ -136,24 +85,68 @@ export const dbOverviewMetricsReducer = (
|
||||
timestamp: 0,
|
||||
},
|
||||
],
|
||||
action: Action,
|
||||
) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.getDbOverviewMetrics:
|
||||
return action.payload;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
customMetricsItem: [
|
||||
{
|
||||
timestamp: 0,
|
||||
value: 0,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
export const filteredTraceMetricsReducer = (
|
||||
state: customMetricsItem[] = [{ timestamp: 0, value: 0 }],
|
||||
action: Action,
|
||||
type ActionType = {
|
||||
type: string;
|
||||
payload: any;
|
||||
};
|
||||
|
||||
export const metricsReducer = (
|
||||
state: MetricsInitialState = metricsInitialState,
|
||||
action: ActionType,
|
||||
) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.getFilteredTraceMetrics:
|
||||
return action.payload;
|
||||
return {
|
||||
...state,
|
||||
customMetricsItem: action.payload,
|
||||
};
|
||||
case ActionTypes.getServiceMetrics:
|
||||
return {
|
||||
...state,
|
||||
metricItems: action.payload,
|
||||
};
|
||||
case ActionTypes.getDbOverviewMetrics:
|
||||
return {
|
||||
...state,
|
||||
dbOverviewMetricsItem: action.payload,
|
||||
};
|
||||
case ActionTypes.getExternalMetrics:
|
||||
return {
|
||||
...state,
|
||||
externalMetricsItem: action.payload,
|
||||
};
|
||||
case ActionTypes.getTopEndpoints:
|
||||
return {
|
||||
...state,
|
||||
topEndpointListItem: action.payload,
|
||||
};
|
||||
case ActionTypes.getErrCodeMetrics:
|
||||
return {
|
||||
...state,
|
||||
externalErrCodeMetricsItem: action.payload,
|
||||
};
|
||||
case ActionTypes.getAvgDurationMetrics:
|
||||
return {
|
||||
...state,
|
||||
externalMetricsAvgDurationItem: action.payload,
|
||||
};
|
||||
|
||||
case ActionTypes.getServicesList:
|
||||
return {
|
||||
...state,
|
||||
serviceList: action.payload,
|
||||
};
|
||||
default:
|
||||
return state;
|
||||
return {
|
||||
...state,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
import { ActionTypes, Action, serviceMapStore } from "../actions";
|
||||
|
||||
export const ServiceMapReducer = (
|
||||
state: serviceMapStore = {
|
||||
items: [],
|
||||
services: [],
|
||||
},
|
||||
action: Action,
|
||||
) => {
|
||||
const initialState: serviceMapStore = {
|
||||
items: [],
|
||||
services: [],
|
||||
};
|
||||
|
||||
export const ServiceMapReducer = (state = initialState, action: Action) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.getServiceMapItems:
|
||||
return {
|
||||
|
||||
@@ -1,19 +1,17 @@
|
||||
import {
|
||||
ActionTypes,
|
||||
TraceFilters,
|
||||
updateInputTagAction,
|
||||
updateTraceFiltersAction,
|
||||
} from "../actions";
|
||||
import { ActionTypes, TraceFilters } from "../actions";
|
||||
|
||||
export const traceFiltersReducer = (
|
||||
state: TraceFilters = {
|
||||
service: "",
|
||||
tags: [],
|
||||
operation: "",
|
||||
latency: { min: "", max: "" },
|
||||
},
|
||||
action: updateTraceFiltersAction,
|
||||
) => {
|
||||
type ACTION = {
|
||||
type: ActionTypes;
|
||||
payload: TraceFilters;
|
||||
};
|
||||
const initialState: TraceFilters = {
|
||||
service: "",
|
||||
tags: [],
|
||||
operation: "",
|
||||
latency: { min: "", max: "" },
|
||||
};
|
||||
|
||||
const TraceFilterReducer = (state = initialState, action: ACTION) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.updateTraceFilters:
|
||||
return action.payload;
|
||||
@@ -22,14 +20,4 @@ export const traceFiltersReducer = (
|
||||
}
|
||||
};
|
||||
|
||||
export const inputsReducer = (
|
||||
state: string = "",
|
||||
action: updateInputTagAction,
|
||||
) => {
|
||||
switch (action.type) {
|
||||
case ActionTypes.updateInput:
|
||||
return action.payload;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
};
|
||||
export default TraceFilterReducer;
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
// dark-theme.less
|
||||
|
||||
@import "~antd/lib/style/color/colorPalette.less";
|
||||
@import "~antd/dist/antd.less";
|
||||
@import "~antd/lib/style/themes/dark.less";
|
||||
|
||||
// @primary-color: #00adb5;
|
||||
// @border-radius-base: 4px;
|
||||
|
||||
// @component-background: #303030;
|
||||
// @body-background: #303030;
|
||||
// @popover-background: #303030;
|
||||
// @border-color-base: #6f6c6c;
|
||||
// @border-color-split: #424242;
|
||||
// @table-header-sort-active-bg: #424242;
|
||||
// @card-skeleton-bg: #424242;
|
||||
// @skeleton-color: #424242;
|
||||
// @table-header-sort-active-bg: #424242;
|
||||
@@ -1,9 +0,0 @@
|
||||
/* light-theme.less */
|
||||
|
||||
@import "~antd/lib/style/color/colorPalette.less";
|
||||
@import "~antd/dist/antd.less";
|
||||
@import "~antd/lib/style/themes/default.less";
|
||||
|
||||
/* These are shared variables that can be extracted to their own file */
|
||||
@primary-color: #00adb5;
|
||||
@border-radius-base: 4px;
|
||||
@@ -2,6 +2,7 @@
|
||||
const { resolve } = require("path");
|
||||
const HtmlWebpackPlugin = require("html-webpack-plugin");
|
||||
console.log(resolve(__dirname, "./src/"));
|
||||
|
||||
module.exports = {
|
||||
mode: "development",
|
||||
devtool: "source-map",
|
||||
@@ -53,7 +54,9 @@ module.exports = {
|
||||
},
|
||||
],
|
||||
},
|
||||
plugins: [new HtmlWebpackPlugin({ template: "src/index.html.ejs" })],
|
||||
plugins: [
|
||||
new HtmlWebpackPlugin({ template: "src/index.html.ejs" }),
|
||||
],
|
||||
performance: {
|
||||
hints: false,
|
||||
},
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
const { resolve } = require("path");
|
||||
const HtmlWebpackPlugin = require("html-webpack-plugin");
|
||||
const CopyPlugin = require("copy-webpack-plugin");
|
||||
const CompressionPlugin = require("compression-webpack-plugin");
|
||||
|
||||
module.exports = {
|
||||
mode: "production",
|
||||
@@ -44,6 +45,9 @@ module.exports = {
|
||||
],
|
||||
},
|
||||
plugins: [
|
||||
new CompressionPlugin({
|
||||
exclude: /.map$/
|
||||
}),
|
||||
new HtmlWebpackPlugin({ template: "src/index.html.ejs" }),
|
||||
new CopyPlugin({
|
||||
patterns: [{ from: resolve(__dirname, "public/"), to: "." }],
|
||||
|
||||
1560
frontend/yarn.lock
1560
frontend/yarn.lock
File diff suppressed because it is too large
Load Diff
10
node_modules/.yarn-integrity
generated
vendored
Normal file
10
node_modules/.yarn-integrity
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"systemParams": "darwin-x64-83",
|
||||
"modulesFolders": [],
|
||||
"flags": [],
|
||||
"linkedModules": [],
|
||||
"topLevelPatterns": [],
|
||||
"lockfileEntries": {},
|
||||
"files": [],
|
||||
"artifacts": {}
|
||||
}
|
||||
Binary file not shown.
124
pkg/query-service/app/clickhouseReader/options.go
Normal file
124
pkg/query-service/app/clickhouseReader/options.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package clickhouseReader
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
type Encoding string
|
||||
|
||||
const (
|
||||
// EncodingJSON is used for spans encoded as JSON.
|
||||
EncodingJSON Encoding = "json"
|
||||
// EncodingProto is used for spans encoded as Protobuf.
|
||||
EncodingProto Encoding = "protobuf"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDatasource string = "tcp://localhost:9000"
|
||||
defaultOperationsTable string = "signoz_operations"
|
||||
defaultIndexTable string = "signoz_index"
|
||||
defaultSpansTable string = "signoz_spans"
|
||||
defaultArchiveSpansTable string = "signoz_archive_spans"
|
||||
defaultWriteBatchDelay time.Duration = 5 * time.Second
|
||||
defaultWriteBatchSize int = 10000
|
||||
defaultEncoding Encoding = EncodingJSON
|
||||
)
|
||||
|
||||
const (
|
||||
suffixEnabled = ".enabled"
|
||||
suffixDatasource = ".datasource"
|
||||
suffixOperationsTable = ".operations-table"
|
||||
suffixIndexTable = ".index-table"
|
||||
suffixSpansTable = ".spans-table"
|
||||
suffixWriteBatchDelay = ".write-batch-delay"
|
||||
suffixWriteBatchSize = ".write-batch-size"
|
||||
suffixEncoding = ".encoding"
|
||||
)
|
||||
|
||||
// NamespaceConfig is Clickhouse's internal configuration data
|
||||
type namespaceConfig struct {
|
||||
namespace string
|
||||
Enabled bool
|
||||
Datasource string
|
||||
OperationsTable string
|
||||
IndexTable string
|
||||
SpansTable string
|
||||
WriteBatchDelay time.Duration
|
||||
WriteBatchSize int
|
||||
Encoding Encoding
|
||||
Connector Connector
|
||||
}
|
||||
|
||||
// Connecto defines how to connect to the database
|
||||
type Connector func(cfg *namespaceConfig) (*sqlx.DB, error)
|
||||
|
||||
func defaultConnector(cfg *namespaceConfig) (*sqlx.DB, error) {
|
||||
db, err := sqlx.Open("clickhouse", cfg.Datasource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := db.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Options store storage plugin related configs
|
||||
type Options struct {
|
||||
primary *namespaceConfig
|
||||
|
||||
others map[string]*namespaceConfig
|
||||
}
|
||||
|
||||
// NewOptions creates a new Options struct.
|
||||
func NewOptions(datasource string, primaryNamespace string, otherNamespaces ...string) *Options {
|
||||
|
||||
if datasource == "" {
|
||||
datasource = defaultDatasource
|
||||
}
|
||||
|
||||
options := &Options{
|
||||
primary: &namespaceConfig{
|
||||
namespace: primaryNamespace,
|
||||
Enabled: true,
|
||||
Datasource: datasource,
|
||||
OperationsTable: defaultOperationsTable,
|
||||
IndexTable: defaultIndexTable,
|
||||
SpansTable: defaultSpansTable,
|
||||
WriteBatchDelay: defaultWriteBatchDelay,
|
||||
WriteBatchSize: defaultWriteBatchSize,
|
||||
Encoding: defaultEncoding,
|
||||
Connector: defaultConnector,
|
||||
},
|
||||
others: make(map[string]*namespaceConfig, len(otherNamespaces)),
|
||||
}
|
||||
|
||||
for _, namespace := range otherNamespaces {
|
||||
if namespace == archiveNamespace {
|
||||
options.others[namespace] = &namespaceConfig{
|
||||
namespace: namespace,
|
||||
Datasource: datasource,
|
||||
OperationsTable: "",
|
||||
IndexTable: "",
|
||||
SpansTable: defaultArchiveSpansTable,
|
||||
WriteBatchDelay: defaultWriteBatchDelay,
|
||||
WriteBatchSize: defaultWriteBatchSize,
|
||||
Encoding: defaultEncoding,
|
||||
Connector: defaultConnector,
|
||||
}
|
||||
} else {
|
||||
options.others[namespace] = &namespaceConfig{namespace: namespace}
|
||||
}
|
||||
}
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
// GetPrimary returns the primary namespace configuration
|
||||
func (opt *Options) getPrimary() *namespaceConfig {
|
||||
return opt.primary
|
||||
}
|
||||
714
pkg/query-service/app/clickhouseReader/reader.go
Normal file
714
pkg/query-service/app/clickhouseReader/reader.go
Normal file
@@ -0,0 +1,714 @@
|
||||
package clickhouseReader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
_ "github.com/ClickHouse/clickhouse-go"
|
||||
"github.com/jmoiron/sqlx"
|
||||
|
||||
"go.signoz.io/query-service/model"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
primaryNamespace = "clickhouse"
|
||||
archiveNamespace = "clickhouse-archive"
|
||||
|
||||
minTimespanForProgressiveSearch = time.Hour
|
||||
minTimespanForProgressiveSearchMargin = time.Minute
|
||||
maxProgressiveSteps = 4
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNoOperationsTable = errors.New("no operations table supplied")
|
||||
ErrNoIndexTable = errors.New("no index table supplied")
|
||||
ErrStartTimeRequired = errors.New("start time is required for search queries")
|
||||
)
|
||||
|
||||
// SpanWriter for reading spans from ClickHouse
|
||||
type ClickHouseReader struct {
|
||||
db *sqlx.DB
|
||||
operationsTable string
|
||||
indexTable string
|
||||
spansTable string
|
||||
}
|
||||
|
||||
// NewTraceReader returns a TraceReader for the database
|
||||
func NewReader() *ClickHouseReader {
|
||||
|
||||
datasource := os.Getenv("ClickHouseUrl")
|
||||
options := NewOptions(datasource, primaryNamespace, archiveNamespace)
|
||||
db, err := initialize(options)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
}
|
||||
return &ClickHouseReader{
|
||||
db: db,
|
||||
operationsTable: options.primary.OperationsTable,
|
||||
indexTable: options.primary.IndexTable,
|
||||
spansTable: options.primary.SpansTable,
|
||||
}
|
||||
}
|
||||
|
||||
func initialize(options *Options) (*sqlx.DB, error) {
|
||||
|
||||
db, err := connect(options.getPrimary())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error connecting to primary db: %v", err)
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func connect(cfg *namespaceConfig) (*sqlx.DB, error) {
|
||||
if cfg.Encoding != EncodingJSON && cfg.Encoding != EncodingProto {
|
||||
return nil, fmt.Errorf("unknown encoding %q, supported: %q, %q", cfg.Encoding, EncodingJSON, EncodingProto)
|
||||
}
|
||||
|
||||
return cfg.Connector(cfg)
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetServices(ctx context.Context, queryParams *model.GetServicesParams) (*[]model.ServiceItem, error) {
|
||||
|
||||
if r.indexTable == "" {
|
||||
return nil, ErrNoIndexTable
|
||||
}
|
||||
|
||||
serviceItems := []model.ServiceItem{}
|
||||
|
||||
query := fmt.Sprintf("SELECT serviceName, quantile(0.99)(durationNano) as p99, avg(durationNano) as avgDuration, count(*) as numCalls FROM %s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' GROUP BY serviceName ORDER BY p99 DESC", r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
|
||||
err := r.db.Select(&serviceItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
////////////////// Below block gets 5xx of services
|
||||
serviceErrorItems := []model.ServiceItem{}
|
||||
|
||||
query = fmt.Sprintf("SELECT serviceName, count(*) as numErrors FROM %s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND statusCode>=500 GROUP BY serviceName", r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
|
||||
err = r.db.Select(&serviceErrorItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
m5xx := make(map[string]int)
|
||||
|
||||
for j, _ := range serviceErrorItems {
|
||||
m5xx[serviceErrorItems[j].ServiceName] = serviceErrorItems[j].NumErrors
|
||||
}
|
||||
///////////////////////////////////////////
|
||||
|
||||
////////////////// Below block gets 4xx of services
|
||||
|
||||
service4xxItems := []model.ServiceItem{}
|
||||
|
||||
query = fmt.Sprintf("SELECT serviceName, count(*) as num4xx FROM %s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND statusCode>=400 AND statusCode<500 GROUP BY serviceName", r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
|
||||
err = r.db.Select(&service4xxItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
m4xx := make(map[string]int)
|
||||
|
||||
for j, _ := range service4xxItems {
|
||||
m5xx[service4xxItems[j].ServiceName] = service4xxItems[j].Num4XX
|
||||
}
|
||||
|
||||
for i, _ := range serviceItems {
|
||||
if val, ok := m5xx[serviceItems[i].ServiceName]; ok {
|
||||
serviceItems[i].NumErrors = val
|
||||
}
|
||||
if val, ok := m4xx[serviceItems[i].ServiceName]; ok {
|
||||
serviceItems[i].Num4XX = val
|
||||
}
|
||||
serviceItems[i].CallRate = float32(serviceItems[i].NumCalls) / float32(queryParams.Period)
|
||||
serviceItems[i].FourXXRate = float32(serviceItems[i].Num4XX) / float32(queryParams.Period)
|
||||
serviceItems[i].ErrorRate = float32(serviceItems[i].NumErrors) / float32(queryParams.Period)
|
||||
}
|
||||
|
||||
return &serviceItems, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetServiceOverview(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, error) {
|
||||
|
||||
serviceOverviewItems := []model.ServiceOverviewItem{}
|
||||
|
||||
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, quantile(0.99)(durationNano) as p99, quantile(0.95)(durationNano) as p95,quantile(0.50)(durationNano) as p50, count(*) as numCalls FROM %s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND serviceName='%s' GROUP BY time ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName)
|
||||
|
||||
err := r.db.Select(&serviceOverviewItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
serviceErrorItems := []model.ServiceErrorItem{}
|
||||
|
||||
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, count(*) as numErrors FROM %s WHERE timestamp>='%s' AND timestamp<='%s' AND kind='2' AND serviceName='%s' AND statusCode>=500 GROUP BY time ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName)
|
||||
|
||||
err = r.db.Select(&serviceErrorItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
m := make(map[int64]int)
|
||||
|
||||
for j, _ := range serviceErrorItems {
|
||||
timeObj, _ := time.Parse(time.RFC3339Nano, serviceErrorItems[j].Time)
|
||||
m[int64(timeObj.UnixNano())] = serviceErrorItems[j].NumErrors
|
||||
}
|
||||
|
||||
for i, _ := range serviceOverviewItems {
|
||||
timeObj, _ := time.Parse(time.RFC3339Nano, serviceOverviewItems[i].Time)
|
||||
serviceOverviewItems[i].Timestamp = int64(timeObj.UnixNano())
|
||||
serviceOverviewItems[i].Time = ""
|
||||
|
||||
if val, ok := m[serviceOverviewItems[i].Timestamp]; ok {
|
||||
serviceOverviewItems[i].NumErrors = val
|
||||
}
|
||||
serviceOverviewItems[i].ErrorRate = float32(serviceOverviewItems[i].NumErrors) * 100 / float32(serviceOverviewItems[i].NumCalls)
|
||||
serviceOverviewItems[i].CallRate = float32(serviceOverviewItems[i].NumCalls) / float32(queryParams.StepSeconds)
|
||||
}
|
||||
|
||||
return &serviceOverviewItems, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) SearchSpans(ctx context.Context, queryParams *model.SpanSearchParams) (*[]model.SearchSpansResult, error) {
|
||||
|
||||
query := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, kind, durationNano, tagsKeys, tagsValues FROM %s WHERE timestamp >= ? AND timestamp <= ?", r.indexTable)
|
||||
|
||||
args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)}
|
||||
|
||||
if len(queryParams.ServiceName) != 0 {
|
||||
query = query + " AND serviceName = ?"
|
||||
args = append(args, queryParams.ServiceName)
|
||||
}
|
||||
|
||||
if len(queryParams.OperationName) != 0 {
|
||||
|
||||
query = query + " AND name = ?"
|
||||
args = append(args, queryParams.OperationName)
|
||||
|
||||
}
|
||||
|
||||
if len(queryParams.Kind) != 0 {
|
||||
query = query + " AND kind = ?"
|
||||
args = append(args, queryParams.Kind)
|
||||
|
||||
}
|
||||
|
||||
if len(queryParams.MinDuration) != 0 {
|
||||
query = query + " AND durationNano >= ?"
|
||||
args = append(args, queryParams.MinDuration)
|
||||
}
|
||||
if len(queryParams.MaxDuration) != 0 {
|
||||
query = query + " AND durationNano <= ?"
|
||||
args = append(args, queryParams.MaxDuration)
|
||||
}
|
||||
|
||||
for _, item := range queryParams.Tags {
|
||||
|
||||
if item.Key == "error" && item.Value == "true" {
|
||||
query = query + " AND ( has(tags, 'error:true') OR statusCode>=500)"
|
||||
continue
|
||||
}
|
||||
|
||||
if item.Operator == "equals" {
|
||||
query = query + " AND has(tags, ?)"
|
||||
args = append(args, fmt.Sprintf("%s:%s", item.Key, item.Value))
|
||||
|
||||
} else if item.Operator == "contains" {
|
||||
query = query + " AND tagsValues[indexOf(tagsKeys, ?)] ILIKE ?"
|
||||
args = append(args, item.Key)
|
||||
args = append(args, fmt.Sprintf("%%%s%%", item.Value))
|
||||
} else if item.Operator == "isnotnull" {
|
||||
query = query + " AND has(tagsKeys, ?)"
|
||||
args = append(args, item.Key)
|
||||
} else {
|
||||
return nil, fmt.Errorf("Tag Operator %s not supported", item.Operator)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
query = query + " ORDER BY timestamp DESC LIMIT 100"
|
||||
|
||||
var searchScanReponses []model.SearchSpanReponseItem
|
||||
|
||||
err := r.db.Select(&searchScanReponses, query, args...)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
searchSpansResult := []model.SearchSpansResult{
|
||||
model.SearchSpansResult{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues"},
|
||||
Events: make([][]interface{}, len(searchScanReponses)),
|
||||
},
|
||||
}
|
||||
|
||||
for i, item := range searchScanReponses {
|
||||
spanEvents := item.GetValues()
|
||||
searchSpansResult[0].Events[i] = spanEvents
|
||||
}
|
||||
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetServiceDBOverview(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceDBOverviewItem, error) {
|
||||
|
||||
var serviceDBOverviewItems []model.ServiceDBOverviewItem
|
||||
|
||||
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, avg(durationNano) as avgDuration, count(1) as numCalls, dbSystem FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' AND kind='3' AND dbName IS NOT NULL GROUP BY time, dbSystem ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
|
||||
err := r.db.Select(&serviceDBOverviewItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
for i, _ := range serviceDBOverviewItems {
|
||||
timeObj, _ := time.Parse(time.RFC3339Nano, serviceDBOverviewItems[i].Time)
|
||||
serviceDBOverviewItems[i].Timestamp = int64(timeObj.UnixNano())
|
||||
serviceDBOverviewItems[i].Time = ""
|
||||
serviceDBOverviewItems[i].CallRate = float32(serviceDBOverviewItems[i].NumCalls) / float32(queryParams.StepSeconds)
|
||||
}
|
||||
|
||||
if serviceDBOverviewItems == nil {
|
||||
serviceDBOverviewItems = []model.ServiceDBOverviewItem{}
|
||||
}
|
||||
|
||||
return &serviceDBOverviewItems, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetServiceExternalAvgDuration(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
|
||||
|
||||
var serviceExternalItems []model.ServiceExternalItem
|
||||
|
||||
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, avg(durationNano) as avgDuration FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' AND kind='3' AND externalHttpUrl IS NOT NULL GROUP BY time ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
|
||||
err := r.db.Select(&serviceExternalItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
for i, _ := range serviceExternalItems {
|
||||
timeObj, _ := time.Parse(time.RFC3339Nano, serviceExternalItems[i].Time)
|
||||
serviceExternalItems[i].Timestamp = int64(timeObj.UnixNano())
|
||||
serviceExternalItems[i].Time = ""
|
||||
serviceExternalItems[i].CallRate = float32(serviceExternalItems[i].NumCalls) / float32(queryParams.StepSeconds)
|
||||
}
|
||||
|
||||
if serviceExternalItems == nil {
|
||||
serviceExternalItems = []model.ServiceExternalItem{}
|
||||
}
|
||||
|
||||
return &serviceExternalItems, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetServiceExternalErrors(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
|
||||
|
||||
var serviceExternalErrorItems []model.ServiceExternalItem
|
||||
|
||||
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, avg(durationNano) as avgDuration, count(1) as numCalls, externalHttpUrl FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' AND kind='3' AND externalHttpUrl IS NOT NULL AND statusCode >= 500 GROUP BY time, externalHttpUrl ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
|
||||
err := r.db.Select(&serviceExternalErrorItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
var serviceExternalTotalItems []model.ServiceExternalItem
|
||||
|
||||
queryTotal := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, avg(durationNano) as avgDuration, count(1) as numCalls, externalHttpUrl FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' AND kind='3' AND externalHttpUrl IS NOT NULL GROUP BY time, externalHttpUrl ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
|
||||
errTotal := r.db.Select(&serviceExternalTotalItems, queryTotal)
|
||||
|
||||
if errTotal != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
m := make(map[string]int)
|
||||
|
||||
for j, _ := range serviceExternalErrorItems {
|
||||
timeObj, _ := time.Parse(time.RFC3339Nano, serviceExternalErrorItems[j].Time)
|
||||
m[strconv.FormatInt(timeObj.UnixNano(), 10)+"-"+serviceExternalErrorItems[j].ExternalHttpUrl] = serviceExternalErrorItems[j].NumCalls
|
||||
}
|
||||
|
||||
for i, _ := range serviceExternalTotalItems {
|
||||
timeObj, _ := time.Parse(time.RFC3339Nano, serviceExternalTotalItems[i].Time)
|
||||
serviceExternalTotalItems[i].Timestamp = int64(timeObj.UnixNano())
|
||||
serviceExternalTotalItems[i].Time = ""
|
||||
// serviceExternalTotalItems[i].CallRate = float32(serviceExternalTotalItems[i].NumCalls) / float32(queryParams.StepSeconds)
|
||||
|
||||
if val, ok := m[strconv.FormatInt(serviceExternalTotalItems[i].Timestamp, 10)+"-"+serviceExternalTotalItems[i].ExternalHttpUrl]; ok {
|
||||
serviceExternalTotalItems[i].NumErrors = val
|
||||
serviceExternalTotalItems[i].ErrorRate = float32(serviceExternalTotalItems[i].NumErrors) * 100 / float32(serviceExternalTotalItems[i].NumCalls)
|
||||
}
|
||||
serviceExternalTotalItems[i].CallRate = 0
|
||||
serviceExternalTotalItems[i].NumCalls = 0
|
||||
|
||||
}
|
||||
|
||||
if serviceExternalTotalItems == nil {
|
||||
serviceExternalTotalItems = []model.ServiceExternalItem{}
|
||||
}
|
||||
|
||||
return &serviceExternalTotalItems, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetServiceExternal(ctx context.Context, queryParams *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
|
||||
|
||||
var serviceExternalItems []model.ServiceExternalItem
|
||||
|
||||
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %s minute) as time, avg(durationNano) as avgDuration, count(1) as numCalls, externalHttpUrl FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' AND kind='3' AND externalHttpUrl IS NOT NULL GROUP BY time, externalHttpUrl ORDER BY time DESC", strconv.Itoa(int(queryParams.StepSeconds/60)), r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
|
||||
err := r.db.Select(&serviceExternalItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
for i, _ := range serviceExternalItems {
|
||||
timeObj, _ := time.Parse(time.RFC3339Nano, serviceExternalItems[i].Time)
|
||||
serviceExternalItems[i].Timestamp = int64(timeObj.UnixNano())
|
||||
serviceExternalItems[i].Time = ""
|
||||
serviceExternalItems[i].CallRate = float32(serviceExternalItems[i].NumCalls) / float32(queryParams.StepSeconds)
|
||||
}
|
||||
|
||||
if serviceExternalItems == nil {
|
||||
serviceExternalItems = []model.ServiceExternalItem{}
|
||||
}
|
||||
|
||||
return &serviceExternalItems, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetTopEndpoints(ctx context.Context, queryParams *model.GetTopEndpointsParams) (*[]model.TopEndpointsItem, error) {
|
||||
|
||||
var topEndpointsItems []model.TopEndpointsItem
|
||||
|
||||
query := fmt.Sprintf("SELECT quantile(0.5)(durationNano) as p50, quantile(0.95)(durationNano) as p95, quantile(0.99)(durationNano) as p99, COUNT(1) as numCalls, name FROM %s WHERE timestamp >= '%s' AND timestamp <= '%s' AND kind='2' and serviceName='%s' GROUP BY name", r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10), queryParams.ServiceName)
|
||||
|
||||
err := r.db.Select(&topEndpointsItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
if topEndpointsItems == nil {
|
||||
topEndpointsItems = []model.TopEndpointsItem{}
|
||||
}
|
||||
|
||||
return &topEndpointsItems, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetUsage(ctx context.Context, queryParams *model.GetUsageParams) (*[]model.UsageItem, error) {
|
||||
|
||||
var usageItems []model.UsageItem
|
||||
|
||||
var query string
|
||||
if len(queryParams.ServiceName) != 0 {
|
||||
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d HOUR) as time, count(1) as count FROM %s WHERE serviceName='%s' AND timestamp>='%s' AND timestamp<='%s' GROUP BY time ORDER BY time ASC", queryParams.StepHour, r.indexTable, queryParams.ServiceName, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
} else {
|
||||
query = fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d HOUR) as time, count(1) as count FROM %s WHERE timestamp>='%s' AND timestamp<='%s' GROUP BY time ORDER BY time ASC", queryParams.StepHour, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
}
|
||||
|
||||
err := r.db.Select(&usageItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
for i, _ := range usageItems {
|
||||
timeObj, _ := time.Parse(time.RFC3339Nano, usageItems[i].Time)
|
||||
usageItems[i].Timestamp = int64(timeObj.UnixNano())
|
||||
usageItems[i].Time = ""
|
||||
}
|
||||
|
||||
if usageItems == nil {
|
||||
usageItems = []model.UsageItem{}
|
||||
}
|
||||
|
||||
return &usageItems, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetServicesList(ctx context.Context) (*[]string, error) {
|
||||
|
||||
services := []string{}
|
||||
|
||||
query := fmt.Sprintf(`SELECT DISTINCT serviceName FROM %s WHERE toDate(timestamp) > now() - INTERVAL 1 DAY`, r.indexTable)
|
||||
|
||||
err := r.db.Select(&services, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
return &services, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetTags(ctx context.Context, serviceName string) (*[]model.TagItem, error) {
|
||||
|
||||
tagItems := []model.TagItem{}
|
||||
|
||||
query := fmt.Sprintf(`SELECT DISTINCT arrayJoin(tagsKeys) as tagKeys FROM %s WHERE serviceName='%s' AND toDate(timestamp) > now() - INTERVAL 1 DAY`, r.indexTable, serviceName)
|
||||
|
||||
err := r.db.Select(&tagItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
return &tagItems, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) GetOperations(ctx context.Context, serviceName string) (*[]string, error) {
|
||||
|
||||
operations := []string{}
|
||||
|
||||
query := fmt.Sprintf(`SELECT DISTINCT(name) FROM %s WHERE serviceName='%s' AND toDate(timestamp) > now() - INTERVAL 1 DAY`, r.indexTable, serviceName)
|
||||
|
||||
err := r.db.Select(&operations, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
return &operations, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) SearchTraces(ctx context.Context, traceId string) (*[]model.SearchSpansResult, error) {
|
||||
|
||||
var searchScanReponses []model.SearchSpanReponseItem
|
||||
|
||||
query := fmt.Sprintf("SELECT timestamp, spanID, traceID, serviceName, name, kind, durationNano, tagsKeys, tagsValues, references FROM %s WHERE traceID='%s'", r.indexTable, traceId)
|
||||
|
||||
err := r.db.Select(&searchScanReponses, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
searchSpansResult := []model.SearchSpansResult{
|
||||
model.SearchSpansResult{
|
||||
Columns: []string{"__time", "SpanId", "TraceId", "ServiceName", "Name", "Kind", "DurationNano", "TagsKeys", "TagsValues", "References"},
|
||||
Events: make([][]interface{}, len(searchScanReponses)),
|
||||
},
|
||||
}
|
||||
|
||||
for i, item := range searchScanReponses {
|
||||
spanEvents := item.GetValues()
|
||||
searchSpansResult[0].Events[i] = spanEvents
|
||||
}
|
||||
|
||||
return &searchSpansResult, nil
|
||||
|
||||
}
|
||||
func (r *ClickHouseReader) GetServiceMapDependencies(ctx context.Context, queryParams *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error) {
|
||||
serviceMapDependencyItems := []model.ServiceMapDependencyItem{}
|
||||
|
||||
query := fmt.Sprintf(`SELECT spanID, parentSpanID, serviceName FROM %s WHERE timestamp>='%s' AND timestamp<='%s'`, r.indexTable, strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10))
|
||||
|
||||
err := r.db.Select(&serviceMapDependencyItems, query)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
serviceMap := make(map[string]*model.ServiceMapDependencyResponseItem)
|
||||
|
||||
spanId2ServiceNameMap := make(map[string]string)
|
||||
for i, _ := range serviceMapDependencyItems {
|
||||
spanId2ServiceNameMap[serviceMapDependencyItems[i].SpanId] = serviceMapDependencyItems[i].ServiceName
|
||||
}
|
||||
for i, _ := range serviceMapDependencyItems {
|
||||
parent2childServiceName := spanId2ServiceNameMap[serviceMapDependencyItems[i].ParentSpanId] + "-" + spanId2ServiceNameMap[serviceMapDependencyItems[i].SpanId]
|
||||
if _, ok := serviceMap[parent2childServiceName]; !ok {
|
||||
serviceMap[parent2childServiceName] = &model.ServiceMapDependencyResponseItem{
|
||||
Parent: spanId2ServiceNameMap[serviceMapDependencyItems[i].ParentSpanId],
|
||||
Child: spanId2ServiceNameMap[serviceMapDependencyItems[i].SpanId],
|
||||
CallCount: 1,
|
||||
}
|
||||
} else {
|
||||
serviceMap[parent2childServiceName].CallCount++
|
||||
}
|
||||
}
|
||||
|
||||
retMe := make([]model.ServiceMapDependencyResponseItem, 0, len(serviceMap))
|
||||
for _, dependency := range serviceMap {
|
||||
if dependency.Parent == "" {
|
||||
continue
|
||||
}
|
||||
retMe = append(retMe, *dependency)
|
||||
}
|
||||
|
||||
return &retMe, nil
|
||||
}
|
||||
|
||||
func (r *ClickHouseReader) SearchSpansAggregate(ctx context.Context, queryParams *model.SpanSearchAggregatesParams) ([]model.SpanSearchAggregatesResponseItem, error) {
|
||||
|
||||
spanSearchAggregatesResponseItems := []model.SpanSearchAggregatesResponseItem{}
|
||||
|
||||
aggregation_query := ""
|
||||
if queryParams.Dimension == "duration" {
|
||||
switch queryParams.AggregationOption {
|
||||
case "p50":
|
||||
aggregation_query = " quantile(0.50)(durationNano) as value "
|
||||
break
|
||||
|
||||
case "p95":
|
||||
aggregation_query = " quantile(0.95)(durationNano) as value "
|
||||
break
|
||||
|
||||
case "p99":
|
||||
aggregation_query = " quantile(0.99)(durationNano) as value "
|
||||
break
|
||||
}
|
||||
} else if queryParams.Dimension == "calls" {
|
||||
aggregation_query = " count(*) as value "
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d minute) as time, %s FROM %s WHERE timestamp >= ? AND timestamp <= ?", queryParams.StepSeconds/60, aggregation_query, r.indexTable)
|
||||
|
||||
args := []interface{}{strconv.FormatInt(queryParams.Start.UnixNano(), 10), strconv.FormatInt(queryParams.End.UnixNano(), 10)}
|
||||
|
||||
if len(queryParams.ServiceName) != 0 {
|
||||
query = query + " AND serviceName = ?"
|
||||
args = append(args, queryParams.ServiceName)
|
||||
}
|
||||
|
||||
if len(queryParams.OperationName) != 0 {
|
||||
|
||||
query = query + " AND name = ?"
|
||||
args = append(args, queryParams.OperationName)
|
||||
|
||||
}
|
||||
|
||||
if len(queryParams.Kind) != 0 {
|
||||
query = query + " AND kind = ?"
|
||||
args = append(args, queryParams.Kind)
|
||||
|
||||
}
|
||||
|
||||
if len(queryParams.MinDuration) != 0 {
|
||||
query = query + " AND durationNano >= ?"
|
||||
args = append(args, queryParams.MinDuration)
|
||||
}
|
||||
if len(queryParams.MaxDuration) != 0 {
|
||||
query = query + " AND durationNano <= ?"
|
||||
args = append(args, queryParams.MaxDuration)
|
||||
}
|
||||
|
||||
for _, item := range queryParams.Tags {
|
||||
|
||||
if item.Key == "error" && item.Value == "true" {
|
||||
query = query + " AND ( has(tags, 'error:true') OR statusCode>=500)"
|
||||
continue
|
||||
}
|
||||
|
||||
if item.Operator == "equals" {
|
||||
query = query + " AND has(tags, ?)"
|
||||
args = append(args, fmt.Sprintf("%s:%s", item.Key, item.Value))
|
||||
|
||||
} else if item.Operator == "contains" {
|
||||
query = query + " AND tagsValues[indexOf(tagsKeys, ?)] ILIKE ?"
|
||||
args = append(args, item.Key)
|
||||
args = append(args, fmt.Sprintf("%%%s%%", item.Value))
|
||||
} else if item.Operator == "isnotnull" {
|
||||
query = query + " AND has(tagsKeys, ?)"
|
||||
args = append(args, item.Key)
|
||||
} else {
|
||||
return nil, fmt.Errorf("Tag Operator %s not supported", item.Operator)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
query = query + " GROUP BY time ORDER BY time"
|
||||
|
||||
err := r.db.Select(&spanSearchAggregatesResponseItems, query, args...)
|
||||
|
||||
zap.S().Info(query)
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug("Error in processing sql query: ", err)
|
||||
return nil, fmt.Errorf("Error in processing sql query")
|
||||
}
|
||||
|
||||
for i, _ := range spanSearchAggregatesResponseItems {
|
||||
|
||||
timeObj, _ := time.Parse(time.RFC3339Nano, spanSearchAggregatesResponseItems[i].Time)
|
||||
spanSearchAggregatesResponseItems[i].Timestamp = int64(timeObj.UnixNano())
|
||||
spanSearchAggregatesResponseItems[i].Time = ""
|
||||
if queryParams.AggregationOption == "rate_per_sec" {
|
||||
spanSearchAggregatesResponseItems[i].Value = float32(spanSearchAggregatesResponseItems[i].Value) / float32(queryParams.StepSeconds)
|
||||
}
|
||||
}
|
||||
|
||||
return spanSearchAggregatesResponseItems, nil
|
||||
|
||||
}
|
||||
99
pkg/query-service/app/druidReader/reader.go
Normal file
99
pkg/query-service/app/druidReader/reader.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package druidReader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"go.signoz.io/query-service/druidQuery"
|
||||
"go.signoz.io/query-service/godruid"
|
||||
"go.signoz.io/query-service/model"
|
||||
)
|
||||
|
||||
type DruidReader struct {
|
||||
Client *godruid.Client
|
||||
SqlClient *druidQuery.SqlClient
|
||||
}
|
||||
|
||||
func NewReader() *DruidReader {
|
||||
|
||||
initialize()
|
||||
druidClientUrl := os.Getenv("DruidClientUrl")
|
||||
|
||||
client := godruid.Client{
|
||||
Url: druidClientUrl,
|
||||
Debug: true,
|
||||
}
|
||||
|
||||
sqlClient := druidQuery.SqlClient{
|
||||
Url: druidClientUrl,
|
||||
Debug: true,
|
||||
}
|
||||
return &DruidReader{
|
||||
Client: &client,
|
||||
SqlClient: &sqlClient,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func initialize() {
|
||||
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetServiceOverview(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, error) {
|
||||
return druidQuery.GetServiceOverview(druid.SqlClient, query)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetServices(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceItem, error) {
|
||||
return druidQuery.GetServices(druid.SqlClient, query)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) SearchSpans(ctx context.Context, query *model.SpanSearchParams) (*[]model.SearchSpansResult, error) {
|
||||
return druidQuery.SearchSpans(druid.Client, query)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetServiceDBOverview(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceDBOverviewItem, error) {
|
||||
return druidQuery.GetServiceDBOverview(druid.SqlClient, query)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetServiceExternalAvgDuration(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
|
||||
return druidQuery.GetServiceExternalAvgDuration(druid.SqlClient, query)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetServiceExternalErrors(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
|
||||
return druidQuery.GetServiceExternalErrors(druid.SqlClient, query)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetServiceExternal(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
|
||||
return druidQuery.GetServiceExternal(druid.SqlClient, query)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetTopEndpoints(ctx context.Context, query *model.GetTopEndpointsParams) (*[]model.TopEndpointsItem, error) {
|
||||
return druidQuery.GetTopEndpoints(druid.SqlClient, query)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetUsage(ctx context.Context, query *model.GetUsageParams) (*[]model.UsageItem, error) {
|
||||
return druidQuery.GetUsage(druid.SqlClient, query)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetOperations(ctx context.Context, serviceName string) (*[]string, error) {
|
||||
return druidQuery.GetOperations(druid.SqlClient, serviceName)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetTags(ctx context.Context, serviceName string) (*[]model.TagItem, error) {
|
||||
return druidQuery.GetTags(druid.SqlClient, serviceName)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetServicesList(ctx context.Context) (*[]string, error) {
|
||||
return druidQuery.GetServicesList(druid.SqlClient)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) SearchTraces(ctx context.Context, traceId string) (*[]model.SearchSpansResult, error) {
|
||||
return druidQuery.SearchTraces(druid.Client, traceId)
|
||||
}
|
||||
|
||||
func (druid *DruidReader) GetServiceMapDependencies(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error) {
|
||||
return druidQuery.GetServiceMapDependencies(druid.SqlClient, query)
|
||||
}
|
||||
func (druid *DruidReader) SearchSpansAggregate(ctx context.Context, queryParams *model.SpanSearchAggregatesParams) ([]model.SpanSearchAggregatesResponseItem, error) {
|
||||
return druidQuery.SearchSpansAggregate(druid.Client, queryParams)
|
||||
}
|
||||
@@ -1,14 +1,13 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/posthog/posthog-go"
|
||||
"go.signoz.io/query-service/druidQuery"
|
||||
"go.signoz.io/query-service/godruid"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
@@ -23,17 +22,15 @@ type APIHandler struct {
|
||||
// queryParser queryParser
|
||||
basePath string
|
||||
apiPrefix string
|
||||
client *godruid.Client
|
||||
sqlClient *druidQuery.SqlClient
|
||||
reader *Reader
|
||||
pc *posthog.Client
|
||||
distinctId string
|
||||
}
|
||||
|
||||
// NewAPIHandler returns an APIHandler
|
||||
func NewAPIHandler(client *godruid.Client, sqlClient *druidQuery.SqlClient, pc *posthog.Client, distinctId string) *APIHandler {
|
||||
func NewAPIHandler(reader *Reader, pc *posthog.Client, distinctId string) *APIHandler {
|
||||
aH := &APIHandler{
|
||||
client: client,
|
||||
sqlClient: sqlClient,
|
||||
reader: reader,
|
||||
pc: pc,
|
||||
distinctId: distinctId,
|
||||
}
|
||||
@@ -58,8 +55,8 @@ type structuredError struct {
|
||||
// RegisterRoutes registers routes for this handler on the given router
|
||||
func (aH *APIHandler) RegisterRoutes(router *mux.Router) {
|
||||
|
||||
router.HandleFunc("/api/v1/user", aH.user).Methods(http.MethodPost)
|
||||
router.HandleFunc("/api/v1/get_percentiles", aH.getApplicationPercentiles).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/user", aH.user).Methods(http.MethodGet)
|
||||
// router.HandleFunc("/api/v1/get_percentiles", aH.getApplicationPercentiles).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/services", aH.getServices).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/services/list", aH.getServicesList).Methods(http.MethodGet)
|
||||
router.HandleFunc("/api/v1/service/overview", aH.getServiceOverview).Methods(http.MethodGet)
|
||||
@@ -94,12 +91,6 @@ func (aH *APIHandler) user(w http.ResponseWriter, r *http.Request) {
|
||||
Set("email", email),
|
||||
})
|
||||
|
||||
_, err = http.Get(fmt.Sprintf("https://api.telegram.org/bot1518273960:AAHcgVvym9a0Qkl-PKiCI84X1VZaVbkTud0/sendMessage?chat_id=351813222&text=%s", email))
|
||||
|
||||
if err != nil {
|
||||
zap.S().Debug(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getOperations(w http.ResponseWriter, r *http.Request) {
|
||||
@@ -115,7 +106,7 @@ func (aH *APIHandler) getOperations(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.GetOperations(aH.sqlClient, serviceName)
|
||||
result, err := (*aH.reader).GetOperations(context.Background(), serviceName)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -126,7 +117,7 @@ func (aH *APIHandler) getOperations(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
func (aH *APIHandler) getServicesList(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
result, err := druidQuery.GetServicesList(aH.sqlClient)
|
||||
result, err := (*aH.reader).GetServicesList(context.Background())
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -139,7 +130,7 @@ func (aH *APIHandler) searchTags(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
serviceName := r.URL.Query().Get("service")
|
||||
|
||||
result, err := druidQuery.GetTags(aH.sqlClient, serviceName)
|
||||
result, err := (*aH.reader).GetTags(context.Background(), serviceName)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -155,7 +146,8 @@ func (aH *APIHandler) getTopEndpoints(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.GetTopEndpoints(aH.sqlClient, query)
|
||||
result, err := (*aH.reader).GetTopEndpoints(context.Background(), query)
|
||||
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -171,7 +163,7 @@ func (aH *APIHandler) getUsage(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.GetUsage(aH.sqlClient, query)
|
||||
result, err := (*aH.reader).GetUsage(context.Background(), query)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -187,7 +179,8 @@ func (aH *APIHandler) getServiceDBOverview(w http.ResponseWriter, r *http.Reques
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.GetServiceDBOverview(aH.sqlClient, query)
|
||||
result, err := (*aH.reader).GetServiceDBOverview(context.Background(), query)
|
||||
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -203,7 +196,7 @@ func (aH *APIHandler) getServiceExternal(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.GetServiceExternal(aH.sqlClient, query)
|
||||
result, err := (*aH.reader).GetServiceExternal(context.Background(), query)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -219,7 +212,7 @@ func (aH *APIHandler) GetServiceExternalAvgDuration(w http.ResponseWriter, r *ht
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.GetServiceExternalAvgDuration(aH.sqlClient, query)
|
||||
result, err := (*aH.reader).GetServiceExternalAvgDuration(context.Background(), query)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -235,7 +228,7 @@ func (aH *APIHandler) getServiceExternalErrors(w http.ResponseWriter, r *http.Re
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.GetServiceExternalErrors(aH.sqlClient, query)
|
||||
result, err := (*aH.reader).GetServiceExternalErrors(context.Background(), query)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -251,7 +244,7 @@ func (aH *APIHandler) getServiceOverview(w http.ResponseWriter, r *http.Request)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.GetServiceOverview(aH.sqlClient, query)
|
||||
result, err := (*aH.reader).GetServiceOverview(context.Background(), query)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -267,7 +260,7 @@ func (aH *APIHandler) getServices(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.GetServices(aH.sqlClient, query)
|
||||
result, err := (*aH.reader).GetServices(context.Background(), query)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -289,7 +282,7 @@ func (aH *APIHandler) serviceMapDependencies(w http.ResponseWriter, r *http.Requ
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.GetServiceMapDependencies(aH.sqlClient, query)
|
||||
result, err := (*aH.reader).GetServiceMapDependencies(context.Background(), query)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -302,7 +295,7 @@ func (aH *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
vars := mux.Vars(r)
|
||||
traceId := vars["traceId"]
|
||||
|
||||
result, err := druidQuery.SearchTraces(aH.client, traceId)
|
||||
result, err := (*aH.reader).SearchTraces(context.Background(), traceId)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -310,6 +303,7 @@ func (aH *APIHandler) searchTraces(w http.ResponseWriter, r *http.Request) {
|
||||
aH.writeJSON(w, r, result)
|
||||
|
||||
}
|
||||
|
||||
func (aH *APIHandler) searchSpansAggregates(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
query, err := parseSearchSpanAggregatesRequest(r)
|
||||
@@ -317,7 +311,7 @@ func (aH *APIHandler) searchSpansAggregates(w http.ResponseWriter, r *http.Reque
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.SearchSpansAggregate(aH.client, query)
|
||||
result, err := (*aH.reader).SearchSpansAggregate(context.Background(), query)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -332,7 +326,9 @@ func (aH *APIHandler) searchSpans(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
result, err := druidQuery.SearchSpans(aH.client, query)
|
||||
// result, err := druidQuery.SearchSpans(aH.client, query)
|
||||
result, err := (*aH.reader).SearchSpans(context.Background(), query)
|
||||
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
@@ -340,20 +336,20 @@ func (aH *APIHandler) searchSpans(w http.ResponseWriter, r *http.Request) {
|
||||
aH.writeJSON(w, r, result)
|
||||
}
|
||||
|
||||
func (aH *APIHandler) getApplicationPercentiles(w http.ResponseWriter, r *http.Request) {
|
||||
// vars := mux.Vars(r)
|
||||
// func (aH *APIHandler) getApplicationPercentiles(w http.ResponseWriter, r *http.Request) {
|
||||
// // vars := mux.Vars(r)
|
||||
|
||||
query, err := parseApplicationPercentileRequest(r)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
// query, err := parseApplicationPercentileRequest(r)
|
||||
// if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
// return
|
||||
// }
|
||||
|
||||
result, err := druidQuery.GetApplicationPercentiles(aH.client, query)
|
||||
if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
return
|
||||
}
|
||||
aH.writeJSON(w, r, result)
|
||||
}
|
||||
// result, err := (*aH.reader).GetApplicationPercentiles(context.Background(), query)
|
||||
// if aH.handleError(w, err, http.StatusBadRequest) {
|
||||
// return
|
||||
// }
|
||||
// aH.writeJSON(w, r, result)
|
||||
// }
|
||||
|
||||
func (aH *APIHandler) handleError(w http.ResponseWriter, err error, statusCode int) bool {
|
||||
if err == nil {
|
||||
|
||||
26
pkg/query-service/app/interface.go
Normal file
26
pkg/query-service/app/interface.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"go.signoz.io/query-service/model"
|
||||
)
|
||||
|
||||
type Reader interface {
|
||||
GetServiceOverview(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, error)
|
||||
GetServices(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceItem, error)
|
||||
// GetApplicationPercentiles(ctx context.Context, query *model.ApplicationPercentileParams) ([]godruid.Timeseries, error)
|
||||
SearchSpans(ctx context.Context, query *model.SpanSearchParams) (*[]model.SearchSpansResult, error)
|
||||
GetServiceDBOverview(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceDBOverviewItem, error)
|
||||
GetServiceExternalAvgDuration(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error)
|
||||
GetServiceExternalErrors(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error)
|
||||
GetServiceExternal(ctx context.Context, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error)
|
||||
GetTopEndpoints(ctx context.Context, query *model.GetTopEndpointsParams) (*[]model.TopEndpointsItem, error)
|
||||
GetUsage(ctx context.Context, query *model.GetUsageParams) (*[]model.UsageItem, error)
|
||||
GetOperations(ctx context.Context, serviceName string) (*[]string, error)
|
||||
GetTags(ctx context.Context, serviceName string) (*[]model.TagItem, error)
|
||||
GetServicesList(ctx context.Context) (*[]string, error)
|
||||
SearchTraces(ctx context.Context, traceID string) (*[]model.SearchSpansResult, error)
|
||||
GetServiceMapDependencies(ctx context.Context, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error)
|
||||
SearchSpansAggregate(ctx context.Context, queryParams *model.SpanSearchAggregatesParams) ([]model.SpanSearchAggregatesResponseItem, error)
|
||||
}
|
||||
@@ -16,7 +16,7 @@ var allowedDimesions = []string{"calls", "duration"}
|
||||
|
||||
var allowedAggregations = map[string][]string{
|
||||
"calls": []string{"count", "rate_per_sec"},
|
||||
"duration": []string{"avg", "p50", "p90", "p99"},
|
||||
"duration": []string{"avg", "p50", "p95", "p99"},
|
||||
}
|
||||
|
||||
func parseGetTopEndpointsRequest(r *http.Request) (*model.GetTopEndpointsParams, error) {
|
||||
@@ -38,6 +38,8 @@ func parseGetTopEndpointsRequest(r *http.Request) (*model.GetTopEndpointsParams,
|
||||
StartTime: startTime.Format(time.RFC3339Nano),
|
||||
EndTime: endTime.Format(time.RFC3339Nano),
|
||||
ServiceName: serviceName,
|
||||
Start: startTime,
|
||||
End: endTime,
|
||||
}
|
||||
|
||||
return &getTopEndpointsParams, nil
|
||||
@@ -64,12 +66,16 @@ func parseGetUsageRequest(r *http.Request) (*model.GetUsageParams, error) {
|
||||
}
|
||||
|
||||
serviceName := r.URL.Query().Get("service")
|
||||
stepHour := stepInt / 3600
|
||||
|
||||
getUsageParams := model.GetUsageParams{
|
||||
StartTime: startTime.Format(time.RFC3339Nano),
|
||||
EndTime: endTime.Format(time.RFC3339Nano),
|
||||
Start: startTime,
|
||||
End: endTime,
|
||||
ServiceName: serviceName,
|
||||
Period: fmt.Sprintf("PT%dH", stepInt/3600),
|
||||
Period: fmt.Sprintf("PT%dH", stepHour),
|
||||
StepHour: stepHour,
|
||||
}
|
||||
|
||||
return &getUsageParams, nil
|
||||
@@ -101,7 +107,9 @@ func parseGetServiceExternalRequest(r *http.Request) (*model.GetServiceOverviewP
|
||||
}
|
||||
|
||||
getServiceOverviewParams := model.GetServiceOverviewParams{
|
||||
Start: startTime,
|
||||
StartTime: startTime.Format(time.RFC3339Nano),
|
||||
End: endTime,
|
||||
EndTime: endTime.Format(time.RFC3339Nano),
|
||||
ServiceName: serviceName,
|
||||
Period: fmt.Sprintf("PT%dM", stepInt/60),
|
||||
@@ -137,7 +145,9 @@ func parseGetServiceOverviewRequest(r *http.Request) (*model.GetServiceOverviewP
|
||||
}
|
||||
|
||||
getServiceOverviewParams := model.GetServiceOverviewParams{
|
||||
Start: startTime,
|
||||
StartTime: startTime.Format(time.RFC3339Nano),
|
||||
End: endTime,
|
||||
EndTime: endTime.Format(time.RFC3339Nano),
|
||||
ServiceName: serviceName,
|
||||
Period: fmt.Sprintf("PT%dM", stepInt/60),
|
||||
@@ -160,7 +170,9 @@ func parseGetServicesRequest(r *http.Request) (*model.GetServicesParams, error)
|
||||
}
|
||||
|
||||
getServicesParams := model.GetServicesParams{
|
||||
Start: startTime,
|
||||
StartTime: startTime.Format(time.RFC3339Nano),
|
||||
End: endTime,
|
||||
EndTime: endTime.Format(time.RFC3339Nano),
|
||||
Period: int(endTime.Unix() - startTime.Unix()),
|
||||
}
|
||||
@@ -222,6 +234,8 @@ func parseSearchSpanAggregatesRequest(r *http.Request) (*model.SpanSearchAggrega
|
||||
}
|
||||
|
||||
params := &model.SpanSearchAggregatesParams{
|
||||
Start: startTime,
|
||||
End: endTime,
|
||||
Intervals: fmt.Sprintf("%s/%s", startTimeStr, endTimeStr),
|
||||
GranOrigin: startTimeStr,
|
||||
GranPeriod: granPeriod,
|
||||
@@ -283,6 +297,8 @@ func parseSpanSearchRequest(r *http.Request) (*model.SpanSearchParams, error) {
|
||||
// fmt.Println(startTimeStr)
|
||||
params := &model.SpanSearchParams{
|
||||
Intervals: fmt.Sprintf("%s/%s", startTimeStr, endTimeStr),
|
||||
Start: startTime,
|
||||
End: endTime,
|
||||
Limit: 100,
|
||||
Order: "descending",
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -11,16 +13,16 @@ import (
|
||||
"github.com/posthog/posthog-go"
|
||||
"github.com/rs/cors"
|
||||
"github.com/soheilhy/cmux"
|
||||
"go.signoz.io/query-service/druidQuery"
|
||||
"go.signoz.io/query-service/godruid"
|
||||
"go.signoz.io/query-service/app/clickhouseReader"
|
||||
"go.signoz.io/query-service/app/druidReader"
|
||||
"go.signoz.io/query-service/healthcheck"
|
||||
"go.signoz.io/query-service/utils"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type ServerOptions struct {
|
||||
HTTPHostPort string
|
||||
DruidClientUrl string
|
||||
HTTPHostPort string
|
||||
// DruidClientUrl string
|
||||
}
|
||||
|
||||
// Server runs HTTP, Mux and a grpc server
|
||||
@@ -28,11 +30,10 @@ type Server struct {
|
||||
// logger *zap.Logger
|
||||
// querySvc *querysvc.QueryService
|
||||
// queryOptions *QueryOptions
|
||||
serverOptions *ServerOptions
|
||||
|
||||
// tracer opentracing.Tracer // TODO make part of flags.Service
|
||||
|
||||
conn net.Listener
|
||||
serverOptions *ServerOptions
|
||||
conn net.Listener
|
||||
// grpcConn net.Listener
|
||||
httpConn net.Listener
|
||||
// grpcServer *grpc.Server
|
||||
@@ -64,6 +65,11 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
httpServer, err := createHTTPServer()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Server{
|
||||
// logger: logger,
|
||||
@@ -72,7 +78,7 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
// tracer: tracer,
|
||||
// grpcServer: grpcServer,
|
||||
serverOptions: serverOptions,
|
||||
httpServer: createHTTPServer(serverOptions.DruidClientUrl),
|
||||
httpServer: httpServer,
|
||||
separatePorts: true,
|
||||
// separatePorts: grpcPort != httpPort,
|
||||
unavailableChannel: make(chan healthcheck.Status),
|
||||
@@ -82,22 +88,25 @@ func NewServer(serverOptions *ServerOptions) (*Server, error) {
|
||||
var posthogClient posthog.Client
|
||||
var distinctId string
|
||||
|
||||
func createHTTPServer(druidClientUrl string) *http.Server {
|
||||
func createHTTPServer() (*http.Server, error) {
|
||||
|
||||
posthogClient = posthog.New("H-htDCae7CR3RV57gUzmol6IAKtm5IMCvbcm_fwnL-w")
|
||||
distinctId = uuid.New().String()
|
||||
|
||||
client := godruid.Client{
|
||||
Url: druidClientUrl,
|
||||
Debug: true,
|
||||
var reader Reader
|
||||
|
||||
storage := os.Getenv("STORAGE")
|
||||
if storage == "druid" {
|
||||
zap.S().Info("Using Apache Druid as datastore ...")
|
||||
reader = druidReader.NewReader()
|
||||
} else if storage == "clickhouse" {
|
||||
zap.S().Info("Using ClickHouse as datastore ...")
|
||||
reader = clickhouseReader.NewReader()
|
||||
} else {
|
||||
return nil, fmt.Errorf("Storage type: %s is not supported in query service", storage)
|
||||
}
|
||||
|
||||
sqlClient := druidQuery.SqlClient{
|
||||
Url: druidClientUrl,
|
||||
Debug: true,
|
||||
}
|
||||
|
||||
apiHandler := NewAPIHandler(&client, &sqlClient, &posthogClient, distinctId)
|
||||
apiHandler := NewAPIHandler(&reader, &posthogClient, distinctId)
|
||||
r := NewRouter()
|
||||
|
||||
r.Use(analyticsMiddleware)
|
||||
@@ -118,7 +127,7 @@ func createHTTPServer(druidClientUrl string) *http.Server {
|
||||
|
||||
return &http.Server{
|
||||
Handler: handler,
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func loggingMiddleware(next http.Handler) http.Handler {
|
||||
|
||||
@@ -11,92 +11,6 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type ServiceItem struct {
|
||||
ServiceName string `json:"serviceName"`
|
||||
Percentile99 float32 `json:"p99"`
|
||||
AvgDuration float32 `json:"avgDuration"`
|
||||
NumCalls int `json:"numCalls"`
|
||||
CallRate float32 `json:"callRate"`
|
||||
NumErrors int `json:"numErrors"`
|
||||
ErrorRate float32 `json:"errorRate"`
|
||||
Num4XX int `json:"num4XX"`
|
||||
FourXXRate float32 `json:"fourXXRate"`
|
||||
}
|
||||
type ServiceListErrorItem struct {
|
||||
ServiceName string `json:"serviceName"`
|
||||
NumErrors int `json:"numErrors"`
|
||||
Num4xx int `json:"num4xx"`
|
||||
}
|
||||
|
||||
type ServiceErrorItem struct {
|
||||
Time string `json:"time,omitempty"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
NumErrors int `json:"numErrors"`
|
||||
}
|
||||
|
||||
type ServiceOverviewItem struct {
|
||||
Time string `json:"time,omitempty"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Percentile50 float32 `json:"p50"`
|
||||
Percentile95 float32 `json:"p95"`
|
||||
Percentile99 float32 `json:"p99"`
|
||||
NumCalls int `json:"numCalls"`
|
||||
CallRate float32 `json:"callRate"`
|
||||
NumErrors int `json:"numErrors"`
|
||||
ErrorRate float32 `json:"errorRate"`
|
||||
}
|
||||
|
||||
type ServiceExternalItem struct {
|
||||
Time string `json:"time,omitempty"`
|
||||
Timestamp int64 `json:"timestamp,omitempty"`
|
||||
ExternalHttpUrl string `json:"externalHttpUrl,omitempty"`
|
||||
AvgDuration float32 `json:"avgDuration,omitempty"`
|
||||
NumCalls int `json:"numCalls,omitempty"`
|
||||
CallRate float32 `json:"callRate,omitempty"`
|
||||
NumErrors int `json:"numErrors"`
|
||||
ErrorRate float32 `json:"errorRate"`
|
||||
}
|
||||
|
||||
type ServiceDBOverviewItem struct {
|
||||
Time string `json:"time,omitempty"`
|
||||
Timestamp int64 `json:"timestamp,omitempty"`
|
||||
DBSystem string `json:"dbSystem,omitempty"`
|
||||
AvgDuration float32 `json:"avgDuration,omitempty"`
|
||||
NumCalls int `json:"numCalls,omitempty"`
|
||||
CallRate float32 `json:"callRate,omitempty"`
|
||||
}
|
||||
|
||||
type ServiceMapDependencyItem struct {
|
||||
SpanId string `json:"spanId,omitempty"`
|
||||
ParentSpanId string `json:"parentSpanId,omitempty"`
|
||||
ServiceName string `json:"serviceName,omitempty"`
|
||||
}
|
||||
|
||||
type UsageItem struct {
|
||||
Time string `json:"time,omitempty"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Count int64 `json:"count"`
|
||||
}
|
||||
|
||||
type TopEnpointsItem struct {
|
||||
Percentile50 float32 `json:"p50"`
|
||||
Percentile90 float32 `json:"p90"`
|
||||
Percentile99 float32 `json:"p99"`
|
||||
NumCalls int `json:"numCalls"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type TagItem struct {
|
||||
TagKeys string `json:"tagKeys"`
|
||||
TagCount int `json:"tagCount"`
|
||||
}
|
||||
|
||||
type ServiceMapDependencyResponseItem struct {
|
||||
Parent string `json:"parent,omitempty"`
|
||||
Child string `json:"child,omitempty"`
|
||||
CallCount int `json:"callCount,omitempty"`
|
||||
}
|
||||
|
||||
func GetOperations(client *SqlClient, serviceName string) (*[]string, error) {
|
||||
|
||||
sqlQuery := fmt.Sprintf(`SELECT DISTINCT(Name) FROM %s WHERE ServiceName='%s' AND __time > CURRENT_TIMESTAMP - INTERVAL '1' DAY`, constants.DruidDatasource, serviceName)
|
||||
@@ -155,7 +69,7 @@ func GetServicesList(client *SqlClient) (*[]string, error) {
|
||||
return &servicesListReponse, nil
|
||||
}
|
||||
|
||||
func GetTags(client *SqlClient, serviceName string) (*[]TagItem, error) {
|
||||
func GetTags(client *SqlClient, serviceName string) (*[]model.TagItem, error) {
|
||||
|
||||
var sqlQuery string
|
||||
|
||||
@@ -176,7 +90,7 @@ func GetTags(client *SqlClient, serviceName string) (*[]TagItem, error) {
|
||||
|
||||
// zap.S().Info(string(response))
|
||||
|
||||
res := new([]TagItem)
|
||||
res := new([]model.TagItem)
|
||||
err = json.Unmarshal(response, res)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -187,9 +101,9 @@ func GetTags(client *SqlClient, serviceName string) (*[]TagItem, error) {
|
||||
return &tagResponse, nil
|
||||
}
|
||||
|
||||
func GetTopEndpoints(client *SqlClient, query *model.GetTopEndpointsParams) (*[]TopEnpointsItem, error) {
|
||||
func GetTopEndpoints(client *SqlClient, query *model.GetTopEndpointsParams) (*[]model.TopEndpointsItem, error) {
|
||||
|
||||
sqlQuery := fmt.Sprintf(`SELECT APPROX_QUANTILE_DS("QuantileDuration", 0.5) as p50, APPROX_QUANTILE_DS("QuantileDuration", 0.9) as p90, APPROX_QUANTILE_DS("QuantileDuration", 0.99) as p99, COUNT(SpanId) as numCalls, Name FROM "%s" WHERE "__time" >= '%s' AND "__time" <= '%s' AND "Kind"='2' and "ServiceName"='%s' GROUP BY Name`, constants.DruidDatasource, query.StartTime, query.EndTime, query.ServiceName)
|
||||
sqlQuery := fmt.Sprintf(`SELECT APPROX_QUANTILE_DS("QuantileDuration", 0.5) as p50, APPROX_QUANTILE_DS("QuantileDuration", 0.95) as p95, APPROX_QUANTILE_DS("QuantileDuration", 0.99) as p99, COUNT(SpanId) as numCalls, Name FROM "%s" WHERE "__time" >= '%s' AND "__time" <= '%s' AND "Kind"='2' and "ServiceName"='%s' GROUP BY Name`, constants.DruidDatasource, query.StartTime, query.EndTime, query.ServiceName)
|
||||
|
||||
// zap.S().Debug(sqlQuery)
|
||||
|
||||
@@ -202,7 +116,7 @@ func GetTopEndpoints(client *SqlClient, query *model.GetTopEndpointsParams) (*[]
|
||||
|
||||
// zap.S().Info(string(response))
|
||||
|
||||
res := new([]TopEnpointsItem)
|
||||
res := new([]model.TopEndpointsItem)
|
||||
err = json.Unmarshal(response, res)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -213,7 +127,7 @@ func GetTopEndpoints(client *SqlClient, query *model.GetTopEndpointsParams) (*[]
|
||||
return &topEnpointsResponse, nil
|
||||
}
|
||||
|
||||
func GetUsage(client *SqlClient, query *model.GetUsageParams) (*[]UsageItem, error) {
|
||||
func GetUsage(client *SqlClient, query *model.GetUsageParams) (*[]model.UsageItem, error) {
|
||||
|
||||
var sqlQuery string
|
||||
|
||||
@@ -236,7 +150,7 @@ func GetUsage(client *SqlClient, query *model.GetUsageParams) (*[]UsageItem, err
|
||||
|
||||
// zap.S().Info(string(response))
|
||||
|
||||
res := new([]UsageItem)
|
||||
res := new([]model.UsageItem)
|
||||
err = json.Unmarshal(response, res)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -253,7 +167,7 @@ func GetUsage(client *SqlClient, query *model.GetUsageParams) (*[]UsageItem, err
|
||||
return &usageResponse, nil
|
||||
}
|
||||
|
||||
func GetServiceExternalAvgDuration(client *SqlClient, query *model.GetServiceOverviewParams) (*[]ServiceExternalItem, error) {
|
||||
func GetServiceExternalAvgDuration(client *SqlClient, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
|
||||
|
||||
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", AVG(DurationNano) as "avgDuration" FROM %s WHERE ServiceName='%s' AND Kind='3' AND ExternalHttpUrl != '' AND "__time" >= '%s' AND "__time" <= '%s'
|
||||
GROUP BY TIME_FLOOR(__time, '%s')`, query.Period, constants.DruidDatasource, query.ServiceName, query.StartTime, query.EndTime, query.Period)
|
||||
@@ -270,7 +184,7 @@ func GetServiceExternalAvgDuration(client *SqlClient, query *model.GetServiceOve
|
||||
// responseStr := string(response)
|
||||
// zap.S().Info(responseStr)
|
||||
|
||||
res := new([]ServiceExternalItem)
|
||||
res := new([]model.ServiceExternalItem)
|
||||
err = json.Unmarshal(response, res)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -289,7 +203,7 @@ func GetServiceExternalAvgDuration(client *SqlClient, query *model.GetServiceOve
|
||||
return &servicesExternalResponse, nil
|
||||
}
|
||||
|
||||
func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverviewParams) (*[]ServiceExternalItem, error) {
|
||||
func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
|
||||
|
||||
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", COUNT(SpanId) as "numCalls", ExternalHttpUrl as externalHttpUrl FROM %s WHERE ServiceName='%s' AND Kind='3' AND ExternalHttpUrl != '' AND StatusCode >= 500 AND "__time" >= '%s' AND "__time" <= '%s'
|
||||
GROUP BY TIME_FLOOR(__time, '%s'), ExternalHttpUrl`, query.Period, constants.DruidDatasource, query.ServiceName, query.StartTime, query.EndTime, query.Period)
|
||||
@@ -306,7 +220,7 @@ func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverview
|
||||
// responseStr := string(response)
|
||||
// zap.S().Info(responseStr)
|
||||
|
||||
res := new([]ServiceExternalItem)
|
||||
res := new([]model.ServiceExternalItem)
|
||||
err = json.Unmarshal(response, res)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -328,7 +242,7 @@ func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverview
|
||||
// responseStr := string(response)
|
||||
// zap.S().Info(responseStr)
|
||||
|
||||
resTotal := new([]ServiceExternalItem)
|
||||
resTotal := new([]model.ServiceExternalItem)
|
||||
err = json.Unmarshal(responseTotal, resTotal)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -361,7 +275,7 @@ func GetServiceExternalErrors(client *SqlClient, query *model.GetServiceOverview
|
||||
return &servicesExternalResponse, nil
|
||||
}
|
||||
|
||||
func GetServiceExternal(client *SqlClient, query *model.GetServiceOverviewParams) (*[]ServiceExternalItem, error) {
|
||||
func GetServiceExternal(client *SqlClient, query *model.GetServiceOverviewParams) (*[]model.ServiceExternalItem, error) {
|
||||
|
||||
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", AVG(DurationNano) as "avgDuration", COUNT(SpanId) as "numCalls", ExternalHttpUrl as externalHttpUrl FROM %s WHERE ServiceName='%s' AND Kind='3' AND ExternalHttpUrl != ''
|
||||
AND "__time" >= '%s' AND "__time" <= '%s'
|
||||
@@ -379,7 +293,7 @@ func GetServiceExternal(client *SqlClient, query *model.GetServiceOverviewParams
|
||||
// responseStr := string(response)
|
||||
// zap.S().Info(responseStr)
|
||||
|
||||
res := new([]ServiceExternalItem)
|
||||
res := new([]model.ServiceExternalItem)
|
||||
err = json.Unmarshal(response, res)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -398,7 +312,7 @@ func GetServiceExternal(client *SqlClient, query *model.GetServiceOverviewParams
|
||||
return &servicesExternalResponse, nil
|
||||
}
|
||||
|
||||
func GetServiceDBOverview(client *SqlClient, query *model.GetServiceOverviewParams) (*[]ServiceDBOverviewItem, error) {
|
||||
func GetServiceDBOverview(client *SqlClient, query *model.GetServiceOverviewParams) (*[]model.ServiceDBOverviewItem, error) {
|
||||
|
||||
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", AVG(DurationNano) as "avgDuration", COUNT(SpanId) as "numCalls", DBSystem as "dbSystem" FROM %s WHERE ServiceName='%s' AND Kind='3' AND DBName IS NOT NULL
|
||||
AND "__time" >= '%s' AND "__time" <= '%s'
|
||||
@@ -416,7 +330,7 @@ func GetServiceDBOverview(client *SqlClient, query *model.GetServiceOverviewPara
|
||||
// responseStr := string(response)
|
||||
// zap.S().Info(responseStr)
|
||||
|
||||
res := new([]ServiceDBOverviewItem)
|
||||
res := new([]model.ServiceDBOverviewItem)
|
||||
err = json.Unmarshal(response, res)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -435,7 +349,7 @@ func GetServiceDBOverview(client *SqlClient, query *model.GetServiceOverviewPara
|
||||
return &servicesDBOverviewResponse, nil
|
||||
}
|
||||
|
||||
func GetServiceOverview(client *SqlClient, query *model.GetServiceOverviewParams) (*[]ServiceOverviewItem, error) {
|
||||
func GetServiceOverview(client *SqlClient, query *model.GetServiceOverviewParams) (*[]model.ServiceOverviewItem, error) {
|
||||
|
||||
sqlQuery := fmt.Sprintf(`SELECT TIME_FLOOR(__time, '%s') as "time", APPROX_QUANTILE_DS("QuantileDuration", 0.5) as p50, APPROX_QUANTILE_DS("QuantileDuration", 0.95) as p95,
|
||||
APPROX_QUANTILE_DS("QuantileDuration", 0.99) as p99, COUNT("SpanId") as "numCalls" FROM "%s" WHERE "__time" >= '%s' and "__time" <= '%s' and "Kind"='2' and "ServiceName"='%s' GROUP BY TIME_FLOOR(__time, '%s') `, query.Period, constants.DruidDatasource, query.StartTime, query.EndTime, query.ServiceName, query.Period)
|
||||
@@ -451,7 +365,7 @@ func GetServiceOverview(client *SqlClient, query *model.GetServiceOverviewParams
|
||||
|
||||
// zap.S().Info(string(response))
|
||||
|
||||
res := new([]ServiceOverviewItem)
|
||||
res := new([]model.ServiceOverviewItem)
|
||||
err = json.Unmarshal(response, res)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -471,7 +385,7 @@ func GetServiceOverview(client *SqlClient, query *model.GetServiceOverviewParams
|
||||
|
||||
// zap.S().Info(string(response))
|
||||
|
||||
resError := new([]ServiceErrorItem)
|
||||
resError := new([]model.ServiceErrorItem)
|
||||
err = json.Unmarshal(responseError, resError)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -501,7 +415,7 @@ func GetServiceOverview(client *SqlClient, query *model.GetServiceOverviewParams
|
||||
return &servicesOverviewResponse, nil
|
||||
}
|
||||
|
||||
func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceItem, error) {
|
||||
func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]model.ServiceItem, error) {
|
||||
|
||||
sqlQuery := fmt.Sprintf(`SELECT APPROX_QUANTILE_DS("QuantileDuration", 0.99) as "p99", AVG("DurationNano") as "avgDuration", COUNT(SpanId) as numCalls, "ServiceName" as "serviceName" FROM %s WHERE "__time" >= '%s' and "__time" <= '%s' and "Kind"='2' GROUP BY "ServiceName" ORDER BY "p99" DESC`, constants.DruidDatasource, query.StartTime, query.EndTime)
|
||||
|
||||
@@ -516,7 +430,7 @@ func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceI
|
||||
|
||||
// zap.S().Info(string(response))
|
||||
|
||||
res := new([]ServiceItem)
|
||||
res := new([]model.ServiceItem)
|
||||
err = json.Unmarshal(response, res)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -538,7 +452,7 @@ func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceI
|
||||
|
||||
// zap.S().Info(string(response))
|
||||
|
||||
resError := new([]ServiceListErrorItem)
|
||||
resError := new([]model.ServiceListErrorItem)
|
||||
err = json.Unmarshal(responseError, resError)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -555,7 +469,7 @@ func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceI
|
||||
|
||||
////////////////// Below block gets 4xx of services
|
||||
|
||||
sqlQuery = fmt.Sprintf(`SELECT COUNT(SpanId) as numErrors, "ServiceName" as "serviceName" FROM %s WHERE "__time" >= '%s' and "__time" <= '%s' and "Kind"='2' and "StatusCode">=400 and "StatusCode" < 500 GROUP BY "ServiceName"`, constants.DruidDatasource, query.StartTime, query.EndTime)
|
||||
sqlQuery = fmt.Sprintf(`SELECT COUNT(SpanId) as num4xx, "ServiceName" as "serviceName" FROM %s WHERE "__time" >= '%s' and "__time" <= '%s' and "Kind"='2' and "StatusCode">=400 and "StatusCode" < 500 GROUP BY "ServiceName"`, constants.DruidDatasource, query.StartTime, query.EndTime)
|
||||
|
||||
response4xx, err := client.Query(sqlQuery, "object")
|
||||
|
||||
@@ -568,7 +482,7 @@ func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceI
|
||||
|
||||
// zap.S().Info(string(response))
|
||||
|
||||
res4xx := new([]ServiceListErrorItem)
|
||||
res4xx := new([]model.ServiceListErrorItem)
|
||||
err = json.Unmarshal(response4xx, res4xx)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -601,7 +515,7 @@ func GetServices(client *SqlClient, query *model.GetServicesParams) (*[]ServiceI
|
||||
return &servicesResponse, nil
|
||||
}
|
||||
|
||||
func GetServiceMapDependencies(client *SqlClient, query *model.GetServicesParams) (*[]ServiceMapDependencyResponseItem, error) {
|
||||
func GetServiceMapDependencies(client *SqlClient, query *model.GetServicesParams) (*[]model.ServiceMapDependencyResponseItem, error) {
|
||||
|
||||
sqlQuery := fmt.Sprintf(`SELECT SpanId, ParentSpanId, ServiceName FROM %s WHERE "__time" >= '%s' AND "__time" <= '%s' ORDER BY __time DESC LIMIT 100000`, constants.DruidDatasource, query.StartTime, query.EndTime)
|
||||
|
||||
@@ -617,7 +531,7 @@ func GetServiceMapDependencies(client *SqlClient, query *model.GetServicesParams
|
||||
// responseStr := string(response)
|
||||
// zap.S().Info(responseStr)
|
||||
|
||||
res := new([]ServiceMapDependencyItem)
|
||||
res := new([]model.ServiceMapDependencyItem)
|
||||
err = json.Unmarshal(response, res)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@@ -626,7 +540,7 @@ func GetServiceMapDependencies(client *SqlClient, query *model.GetServicesParams
|
||||
// resCount := len(*res)
|
||||
// fmt.Println(resCount)
|
||||
|
||||
serviceMap := make(map[string]*ServiceMapDependencyResponseItem)
|
||||
serviceMap := make(map[string]*model.ServiceMapDependencyResponseItem)
|
||||
|
||||
spanId2ServiceNameMap := make(map[string]string)
|
||||
for i, _ := range *res {
|
||||
@@ -635,7 +549,7 @@ func GetServiceMapDependencies(client *SqlClient, query *model.GetServicesParams
|
||||
for i, _ := range *res {
|
||||
parent2childServiceName := spanId2ServiceNameMap[(*res)[i].ParentSpanId] + "-" + spanId2ServiceNameMap[(*res)[i].SpanId]
|
||||
if _, ok := serviceMap[parent2childServiceName]; !ok {
|
||||
serviceMap[parent2childServiceName] = &ServiceMapDependencyResponseItem{
|
||||
serviceMap[parent2childServiceName] = &model.ServiceMapDependencyResponseItem{
|
||||
Parent: spanId2ServiceNameMap[(*res)[i].ParentSpanId],
|
||||
Child: spanId2ServiceNameMap[(*res)[i].SpanId],
|
||||
CallCount: 1,
|
||||
@@ -645,7 +559,7 @@ func GetServiceMapDependencies(client *SqlClient, query *model.GetServicesParams
|
||||
}
|
||||
}
|
||||
|
||||
retMe := make([]ServiceMapDependencyResponseItem, 0, len(serviceMap))
|
||||
retMe := make([]model.ServiceMapDependencyResponseItem, 0, len(serviceMap))
|
||||
for _, dependency := range serviceMap {
|
||||
if dependency.Parent == "" {
|
||||
continue
|
||||
|
||||
@@ -27,11 +27,6 @@ type SpanSearchAggregatesDuratonReceivedItem struct {
|
||||
Result DurationItem `json:"result"`
|
||||
}
|
||||
|
||||
type SpanSearchAggregatesResponseItem struct {
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Value float32 `json:"value"`
|
||||
}
|
||||
|
||||
func buildFilters(queryParams *model.SpanSearchParams) (*godruid.Filter, error) {
|
||||
|
||||
var filter *godruid.Filter
|
||||
@@ -181,7 +176,7 @@ func buildFiltersForSpansAggregates(queryParams *model.SpanSearchAggregatesParam
|
||||
|
||||
}
|
||||
|
||||
func SearchTraces(client *godruid.Client, traceId string) ([]godruid.ScanResult, error) {
|
||||
func SearchTraces(client *godruid.Client, traceId string) (*[]model.SearchSpansResult, error) {
|
||||
|
||||
filter := godruid.FilterSelector("TraceId", traceId)
|
||||
|
||||
@@ -206,10 +201,20 @@ func SearchTraces(client *godruid.Client, traceId string) ([]godruid.ScanResult,
|
||||
|
||||
// fmt.Printf("query.QueryResult:\n%v", query.QueryResult)
|
||||
|
||||
return query.QueryResult, nil
|
||||
var searchSpansResult []model.SearchSpansResult
|
||||
searchSpansResult = make([]model.SearchSpansResult, len(query.QueryResult))
|
||||
|
||||
searchSpansResult[0].Columns = make([]string, len(query.QueryResult[0].Columns))
|
||||
copy(searchSpansResult[0].Columns, query.QueryResult[0].Columns)
|
||||
|
||||
searchSpansResult[0].Events = make([][]interface{}, len(query.QueryResult[0].Events))
|
||||
copy(searchSpansResult[0].Events, query.QueryResult[0].Events)
|
||||
|
||||
return &searchSpansResult, nil
|
||||
|
||||
}
|
||||
|
||||
func SearchSpansAggregate(client *godruid.Client, queryParams *model.SpanSearchAggregatesParams) ([]SpanSearchAggregatesResponseItem, error) {
|
||||
func SearchSpansAggregate(client *godruid.Client, queryParams *model.SpanSearchAggregatesParams) ([]model.SpanSearchAggregatesResponseItem, error) {
|
||||
|
||||
filter, err := buildFiltersForSpansAggregates(queryParams)
|
||||
var needsPostAggregation bool = true
|
||||
@@ -235,10 +240,10 @@ func SearchSpansAggregate(client *godruid.Client, queryParams *model.SpanSearchA
|
||||
postAggregationString := `{"type":"quantilesDoublesSketchToQuantile","name":"value","field":{"type":"fieldAccess","fieldName":"quantile_agg"},"fraction":0.5}`
|
||||
postAggregation = godruid.PostAggRawJson(postAggregationString)
|
||||
break
|
||||
case "p90":
|
||||
case "p95":
|
||||
aggregationString := `{ "type": "quantilesDoublesSketch", "fieldName": "QuantileDuration", "name": "quantile_agg", "k": 128}`
|
||||
aggregation = godruid.AggRawJson(aggregationString)
|
||||
postAggregationString := `{"type":"quantilesDoublesSketchToQuantile","name":"value","field":{"type":"fieldAccess","fieldName":"quantile_agg"},"fraction":0.9}`
|
||||
postAggregationString := `{"type":"quantilesDoublesSketchToQuantile","name":"value","field":{"type":"fieldAccess","fieldName":"quantile_agg"},"fraction":0.95}`
|
||||
postAggregation = godruid.PostAggRawJson(postAggregationString)
|
||||
break
|
||||
|
||||
@@ -293,7 +298,7 @@ func SearchSpansAggregate(client *godruid.Client, queryParams *model.SpanSearchA
|
||||
return nil, fmt.Errorf("Error in unmarshalling response from druid")
|
||||
}
|
||||
|
||||
var response []SpanSearchAggregatesResponseItem
|
||||
var response []model.SpanSearchAggregatesResponseItem
|
||||
|
||||
for _, elem := range *receivedResponse {
|
||||
|
||||
@@ -304,7 +309,7 @@ func SearchSpansAggregate(client *godruid.Client, queryParams *model.SpanSearchA
|
||||
if queryParams.AggregationOption == "rate_per_sec" {
|
||||
value = elem.Result.Value * 1.0 / float32(queryParams.StepSeconds)
|
||||
}
|
||||
response = append(response, SpanSearchAggregatesResponseItem{
|
||||
response = append(response, model.SpanSearchAggregatesResponseItem{
|
||||
Timestamp: timestamp,
|
||||
Value: value,
|
||||
})
|
||||
@@ -316,7 +321,7 @@ func SearchSpansAggregate(client *godruid.Client, queryParams *model.SpanSearchA
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func SearchSpans(client *godruid.Client, queryParams *model.SpanSearchParams) ([]godruid.ScanResult, error) {
|
||||
func SearchSpans(client *godruid.Client, queryParams *model.SpanSearchParams) (*[]model.SearchSpansResult, error) {
|
||||
|
||||
filter, err := buildFilters(queryParams)
|
||||
|
||||
@@ -347,7 +352,16 @@ func SearchSpans(client *godruid.Client, queryParams *model.SpanSearchParams) ([
|
||||
|
||||
// fmt.Printf("query.QueryResult:\n%v", query.QueryResult)
|
||||
|
||||
return query.QueryResult, nil
|
||||
var searchSpansResult []model.SearchSpansResult
|
||||
searchSpansResult = make([]model.SearchSpansResult, len(query.QueryResult))
|
||||
|
||||
searchSpansResult[0].Columns = make([]string, len(query.QueryResult[0].Columns))
|
||||
copy(searchSpansResult[0].Columns, query.QueryResult[0].Columns)
|
||||
|
||||
searchSpansResult[0].Events = make([][]interface{}, len(query.QueryResult[0].Events))
|
||||
copy(searchSpansResult[0].Events, query.QueryResult[0].Events)
|
||||
|
||||
return &searchSpansResult, nil
|
||||
}
|
||||
|
||||
func GetApplicationPercentiles(client *godruid.Client, queryParams *model.ApplicationPercentileParams) ([]godruid.Timeseries, error) {
|
||||
|
||||
@@ -3,10 +3,14 @@ module go.signoz.io/query-service
|
||||
go 1.14
|
||||
|
||||
require (
|
||||
github.com/ClickHouse/clickhouse-go v1.4.5
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/jaegertracing/jaeger v1.21.0
|
||||
github.com/jmoiron/sqlx v1.3.4
|
||||
github.com/opentracing/opentracing-go v1.1.0
|
||||
github.com/ory/viper v1.7.5
|
||||
github.com/posthog/posthog-go v0.0.0-20200525173953-e46dc8e6b89b
|
||||
github.com/rs/cors v1.7.0
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user