Compare commits
25 Commits
v0.48.0-cl
...
new-table-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3476f3032d | ||
|
|
4b757b382f | ||
|
|
8ff392bc96 | ||
|
|
b59d9c7b90 | ||
|
|
afcee9cd02 | ||
|
|
82a079e687 | ||
|
|
adfeaaa1f0 | ||
|
|
6ee9705599 | ||
|
|
67965c8e4d | ||
|
|
38b1de5ccc | ||
|
|
64e06ab3f9 | ||
|
|
f3c2fb0246 | ||
|
|
a4e98e565d | ||
|
|
faa1728b8c | ||
|
|
b69e97d7b0 | ||
|
|
c0195e9dc9 | ||
|
|
b69545a771 | ||
|
|
9a6db272c1 | ||
|
|
45d6430ab3 | ||
|
|
cf7bf32ac2 | ||
|
|
1695b4f06d | ||
|
|
a65d5095a0 | ||
|
|
0fade428ef | ||
|
|
3b4b9e43b3 | ||
|
|
c104b758ba |
@@ -34,6 +34,8 @@
|
||||
"@dnd-kit/core": "6.1.0",
|
||||
"@dnd-kit/modifiers": "7.0.0",
|
||||
"@dnd-kit/sortable": "8.0.0",
|
||||
"@dnd-kit/utilities": "3.2.2",
|
||||
"@faker-js/faker": "8.4.1",
|
||||
"@grafana/data": "^9.5.2",
|
||||
"@mdx-js/loader": "2.3.0",
|
||||
"@mdx-js/react": "2.3.0",
|
||||
@@ -43,6 +45,7 @@
|
||||
"@sentry/react": "7.102.1",
|
||||
"@sentry/webpack-plugin": "2.16.0",
|
||||
"@signozhq/design-tokens": "0.0.8",
|
||||
"@tanstack/react-table": "8.17.3",
|
||||
"@uiw/react-md-editor": "3.23.5",
|
||||
"@visx/group": "3.3.0",
|
||||
"@visx/shape": "3.5.0",
|
||||
|
||||
1
frontend/public/Logos/azure-aks.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="2.94" y1="3.74" x2="8.67" y2="3.74" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="b" x1="9.13" y1="3.79" x2="14.85" y2="3.79" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="c" x1=".01" y1="9.12" x2="5.73" y2="9.12" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="d" x1="6.18" y1="9.08" x2="11.9" y2="9.08" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="e" x1="12.35" y1="9.13" x2="18.08" y2="9.13" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="f" x1="2.87" y1="14.56" x2="8.6" y2="14.56" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient><linearGradient id="g" x1="9.05" y1="14.6" x2="14.78" y2="14.6" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#b77af4"/><stop offset="1" stop-color="#773adc"/></linearGradient></defs><path fill="url(#a)" d="M5.8 1.22l-2.86.53v3.9l2.86.61 2.87-1.15V2.2L5.8 1.22z"/><path d="M5.91 6.2l2.62-1.06A.2.2 0 008.65 5V2.36a.21.21 0 00-.13-.18l-2.65-.9h-.12l-2.6.48a.2.2 0 00-.15.18v3.53a.19.19 0 00.15.19l2.63.55a.32.32 0 00.13-.01z" fill="none"/><path d="M2.94 1.75v3.9l2.89.61v-5zm1.22 3.6l-.81-.16v-3l.81-.13zm1.26.23l-.93-.15V2l.93-.16z" fill="#341a6e"/><path fill="url(#b)" d="M11.99 1.27l-2.86.53v3.9l2.86.61 2.86-1.16v-2.9l-2.86-.98z"/><path d="M9.13 1.8v3.9l2.87.61v-5zm1.21 3.6l-.81-.16v-3l.81-.13zm1.26.23l-.93-.15V2.05l.93-.17z" fill="#341a6e"/><path fill="url(#c)" d="M2.87 6.6l-2.86.53v3.9l2.86.61 2.87-1.15V7.58L2.87 6.6z"/><path d="M0 7.13V11l2.89.61v-5zm1.21 3.61l-.81-.17v-3l.81-.14zm1.27.26l-.93-.15V7.38l.93-.16z" fill="#341a6e"/><path fill="url(#d)" d="M9.04 6.56l-2.86.53v3.9l2.86.62 2.86-1.16V7.54l-2.86-.98z"/><path d="M6.18 7.09V11l2.88.61v-5zm1.21 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15V7.34l.93-.16z" fill="#341a6e"/><path fill="url(#e)" d="M15.21 6.61l-2.86.53v3.9l2.86.61 2.87-1.15V7.59l-2.87-.98z"/><path d="M12.35 7.14V11l2.89.61v-5zm1.22 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15V7.39l.93-.16z" fill="#341a6e"/><path fill="url(#f)" d="M5.73 12.04l-2.86.52v3.9l2.86.62 2.87-1.16v-2.9l-2.87-.98z"/><path d="M5.84 17l2.61-1a.18.18 0 00.12-.18v-2.6a.2.2 0 00-.13-.22l-2.64-.9a.17.17 0 00-.12 0l-2.6.47a.19.19 0 00-.16.19v3.54a.19.19 0 00.15.19L5.7 17a.23.23 0 00.14 0z" fill="none"/><path d="M2.87 12.56v3.9l2.89.62V12zm1.22 3.61L3.28 16v-3l.81-.14zm1.26.23l-.93-.15v-3.44l.93-.16z" fill="#341a6e"/><path fill="url(#g)" d="M11.91 12.08l-2.86.53v3.9l2.86.61 2.87-1.15v-2.91l-2.87-.98z"/><path d="M9.05 12.61v3.9l2.89.61v-5zm1.22 3.61l-.81-.17v-3l.81-.14zm1.26.22l-.93-.15v-3.43l.93-.16z" fill="#341a6e"/></svg>
|
||||
|
After Width: | Height: | Size: 3.1 KiB |
1
frontend/public/Logos/azure-app-service.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="b" x1="4.4" y1="11.48" x2="4.37" y2="7.53" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="c" x1="10.13" y1="15.45" x2="10.13" y2="11.9" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><linearGradient id="d" x1="14.18" y1="11.15" x2="14.18" y2="7.38" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#ccc"/><stop offset="1" stop-color="#fcfcfc"/></linearGradient><radialGradient id="a" cx="13428.81" cy="3518.86" r="56.67" gradientTransform="matrix(.15 0 0 .15 -2005.33 -518.83)" gradientUnits="userSpaceOnUse"><stop offset=".18" stop-color="#5ea0ef"/><stop offset="1" stop-color="#0078d4"/></radialGradient></defs><path d="M14.21 15.72A8.5 8.5 0 013.79 2.28l.09-.06a8.5 8.5 0 0110.33 13.5" fill="url(#a)"/><path d="M6.69 7.23a13 13 0 018.91-3.58 8.47 8.47 0 00-1.49-1.44 14.34 14.34 0 00-4.69 1.1 12.54 12.54 0 00-4.08 2.82 2.76 2.76 0 011.35 1.1zM2.48 10.65a17.86 17.86 0 00-.83 2.62 7.82 7.82 0 00.62.92c.18.23.35.44.55.65a17.94 17.94 0 011.08-3.47 2.76 2.76 0 01-1.42-.72z" fill="#fff" opacity=".6"/><path d="M3.46 6.11a12 12 0 01-.69-2.94 8.15 8.15 0 00-1.1 1.45A12.69 12.69 0 002.24 7a2.69 2.69 0 011.22-.89z" fill="#f2f2f2" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><path d="M8.36 13.67a1.77 1.77 0 01.54-1.27 11.88 11.88 0 01-2.53-1.86 2.74 2.74 0 01-1.49.83 13.1 13.1 0 001.45 1.28 12.12 12.12 0 002.05 1.25 1.79 1.79 0 01-.02-.23zM14.66 13.88a12 12 0 01-2.76-.32.41.41 0 010 .11 1.75 1.75 0 01-.51 1.24 13.69 13.69 0 003.42.24A8.21 8.21 0 0016 13.81a11.5 11.5 0 01-1.34.07z" fill="#f2f2f2" opacity=".55"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/><path d="M12.32 8.93a1.83 1.83 0 01.61-1 25.5 25.5 0 01-4.46-4.14 16.91 16.91 0 01-2-2.92 7.64 7.64 0 00-1.09.42 18.14 18.14 0 002.15 3.18 26.44 26.44 0 004.79 4.46z" fill="#f2f2f2" opacity=".7"/><circle cx="14.18" cy="9.27" r="1.89" fill="url(#d)"/><path d="M17.35 10.54l-.35-.17-.3-.16h-.06l-.26-.21h-.07L16 9.8a1.76 1.76 0 01-.64.92c.12.08.25.15.38.22l.08.05.35.19.86.45a8.63 8.63 0 00.29-1.11z" fill="#f2f2f2" opacity=".55"/><circle cx="4.38" cy="8.68" r="2.73" fill="url(#b)"/><circle cx="10.13" cy="13.67" r="1.78" fill="url(#c)"/></svg>
|
||||
|
After Width: | Height: | Size: 2.3 KiB |
2
frontend/public/Logos/azure-blob-storage.svg
Normal file
|
After Width: | Height: | Size: 6.7 KiB |
1
frontend/public/Logos/azure-container-apps.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" viewBox="0 0 18 18"><defs><linearGradient id="b27f1ad0-7d11-4247-9da3-91bce6211f32" x1="8.798" y1="8.703" x2="14.683" y2="8.703" gradientUnits="userSpaceOnUse"><stop offset="0.001" stop-color="#773adc" /><stop offset="1" stop-color="#552f99" /></linearGradient><linearGradient id="b2f92112-4ca9-4b17-a019-c9f26c1a4a8f" x1="5.764" y1="3.777" x2="5.764" y2="13.78" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#a67af4" /><stop offset="0.999" stop-color="#773adc" /></linearGradient></defs><g id="b8a0486a-5501-4d92-b540-a766c4b3b548"><g><g><g><path d="M16.932,11.578a8.448,8.448,0,0,1-7.95,5.59,8.15,8.15,0,0,1-2.33-.33,2.133,2.133,0,0,0,.18-.83c.01,0,.03.01.04.01a7.422,7.422,0,0,0,2.11.3,7.646,7.646,0,0,0,6.85-4.28l.01-.01Z" fill="#32bedd" /><path d="M3.582,14.068a2.025,2.025,0,0,0-.64.56,8.6,8.6,0,0,1-1.67-2.44l1.04.23v.26a.6.6,0,0,0,.47.59l.14.03a6.136,6.136,0,0,0,.62.73Z" fill="#32bedd" /><path d="M12.352.958a2.28,2.28,0,0,0-.27.81c-.02-.01-.05-.02-.07-.03a7.479,7.479,0,0,0-3.03-.63,7.643,7.643,0,0,0-5.9,2.8l-.29.06a.6.6,0,0,0-.48.58v.46l-1.02.19A8.454,8.454,0,0,1,8.982.268,8.6,8.6,0,0,1,12.352.958Z" fill="#32bedd" /><path d="M16.872,5.7l-1.09-.38a6.6,6.6,0,0,0-.72-1.16c-.02-.03-.04-.05-.05-.07a2.083,2.083,0,0,0,.72-.45A7.81,7.81,0,0,1,16.872,5.7Z" fill="#32bedd" /><path d="M10.072,11.908l2.54.56L8.672,14.1c-.02,0-.03.01-.05.01a.154.154,0,0,1-.15-.15V3.448a.154.154,0,0,1,.15-.15.09.09,0,0,1,.05.01l4.46,1.56-3.05.57a.565.565,0,0,0-.44.54v5.4A.537.537,0,0,0,10.072,11.908Z" fill="#fff" /><g><g id="e918f286-5032-4942-ad29-ea17e6f1cc90"><path d="M1.1,5.668l1.21-.23v6.55l-1.23-.27-.99-.22a.111.111,0,0,1-.09-.12v-5.4a.12.12,0,0,1,.09-.12Z" fill="#a67af4" /></g><g><g id="a47a99dd-4d47-4c70-8c42-c5ac274ce496"><g><path d="M10.072,11.908l2.54.56L8.672,14.1c-.02,0-.03.01-.05.01a.154.154,0,0,1-.15-.15V3.448a.154.154,0,0,1,.15-.15.09.09,0,0,1,.05.01l4.46,1.56-3.05.57a.565.565,0,0,0-.44.54v5.4A.537.537,0,0,0,10.072,11.908Z" fill="url(#b27f1ad0-7d11-4247-9da3-91bce6211f32)" /><path d="M8.586,3.3,2.878,4.378a.177.177,0,0,0-.14.175V12.68a.177.177,0,0,0,.137.174L8.581,14.1a.176.176,0,0,0,.21-.174V3.478A.175.175,0,0,0,8.619,3.3Z" fill="url(#b2f92112-4ca9-4b17-a019-c9f26c1a4a8f)" /></g></g><polygon points="5.948 4.921 5.948 12.483 7.934 12.814 7.934 4.564 5.948 4.921" fill="#b796f9" opacity="0.5" /><polygon points="3.509 5.329 3.509 11.954 5.238 12.317 5.238 5.031 3.509 5.329" fill="#b796f9" opacity="0.5" /></g></g></g><path d="M16,2.048a1.755,1.755,0,1,1-1.76-1.76A1.756,1.756,0,0,1,16,2.048Z" fill="#32bedd" /><circle cx="4.65" cy="15.973" r="1.759" fill="#32bedd" /></g><path d="M18,6.689v3.844a.222.222,0,0,1-.133.2l-.766.316-3.07,1.268-.011,0a.126.126,0,0,1-.038,0,.1.1,0,0,1-.1-.1V5.234a.1.1,0,0,1,.054-.088l0,0,.019,0a.031.031,0,0,1,.019,0,.055.055,0,0,1,.034.008l.011,0,.012,0L17.05,6.2l.8.282A.213.213,0,0,1,18,6.689Z" fill="#773adc" /><path d="M13.959,5.14l-3.8.715a.118.118,0,0,0-.093.117v5.409a.118.118,0,0,0,.091.116l3.8.831a.115.115,0,0,0,.137-.09.109.109,0,0,0,0-.026V5.256a.117.117,0,0,0-.115-.118A.082.082,0,0,0,13.959,5.14Z" fill="#a67af4" /></g></g></svg>
|
||||
|
After Width: | Height: | Size: 3.1 KiB |
1
frontend/public/Logos/azure-functions.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="-175.993" y1="-343.723" x2="-175.993" y2="-359.232" gradientTransform="matrix(1.156 0 0 1.029 212.573 370.548)" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#fea11b"/><stop offset=".284" stop-color="#fea51a"/><stop offset=".547" stop-color="#feb018"/><stop offset=".8" stop-color="#ffc314"/><stop offset="1" stop-color="#ffd70f"/></linearGradient></defs><path d="M5.54 13.105l-.586.588a.267.267 0 01-.377 0L.223 9.353a.533.533 0 010-.755l.588-.59 4.732 4.718a.267.267 0 010 .378z" fill="#50e6ff"/><path d="M4.863 4.305l.59.588a.267.267 0 010 .378L.806 9.932l-.59-.589a.533.533 0 01-.001-.754l4.273-4.285a.267.267 0 01.376 0z" fill="#1490df"/><path d="M17.19 8.012l.588.59a.533.533 0 01-.001.754l-4.354 4.34a.267.267 0 01-.377 0l-.586-.587a.267.267 0 010-.377l4.732-4.718z" fill="#50e6ff"/><path d="M17.782 9.34l-.59.589-4.648-4.662a.267.267 0 010-.377l.59-.588a.267.267 0 01.378 0l4.273 4.286a.533.533 0 010 .753z" fill="#1490df"/><path d="M8.459 9.9H4.87a.193.193 0 01-.2-.181.166.166 0 01.018-.075L8.991 1.13a.206.206 0 01.186-.106h4.245a.193.193 0 01.2.181.165.165 0 01-.035.1L8.534 7.966h4.928a.193.193 0 01.2.181.176.176 0 01-.052.122l-8.189 8.519c-.077.046-.624.5-.356-.189z" fill="url(#a)"/></svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
1
frontend/public/Logos/azure-sql-database-metrics.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><radialGradient id="b" cx="9.36" cy="10.57" r="7.07" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#f2f2f2"/><stop offset=".58" stop-color="#eee"/><stop offset="1" stop-color="#e6e6e6"/></radialGradient><linearGradient id="a" x1="2.59" y1="10.16" x2="15.41" y2="10.16" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#005ba1"/><stop offset=".07" stop-color="#0060a9"/><stop offset=".36" stop-color="#0071c8"/><stop offset=".52" stop-color="#0078d4"/><stop offset=".64" stop-color="#0074cd"/><stop offset=".82" stop-color="#006abb"/><stop offset="1" stop-color="#005ba1"/></linearGradient></defs><path d="M9 5.14c-3.54 0-6.41-1-6.41-2.32v12.36c0 1.27 2.82 2.3 6.32 2.32H9c3.54 0 6.41-1 6.41-2.32V2.82c0 1.29-2.87 2.32-6.41 2.32z" fill="url(#a)"/><path d="M15.41 2.82c0 1.29-2.87 2.32-6.41 2.32s-6.41-1-6.41-2.32S5.46.5 9 .5s6.41 1 6.41 2.32" fill="#e8e8e8"/><path d="M13.92 2.63c0 .82-2.21 1.48-4.92 1.48s-4.92-.66-4.92-1.48S6.29 1.16 9 1.16s4.92.66 4.92 1.47" fill="#50e6ff"/><path d="M9 3a11.55 11.55 0 00-3.89.57A11.42 11.42 0 009 4.11a11.15 11.15 0 003.89-.58A11.84 11.84 0 009 3z" fill="#198ab3"/><path d="M12.9 11.4V8H12v4.13h2.46v-.73zM5.76 9.73a1.83 1.83 0 01-.51-.31.44.44 0 01-.12-.32.34.34 0 01.15-.3.68.68 0 01.42-.12 1.62 1.62 0 011 .29v-.86a2.58 2.58 0 00-1-.16 1.64 1.64 0 00-1.09.34 1.08 1.08 0 00-.42.89c0 .51.32.91 1 1.21a2.88 2.88 0 01.62.36.42.42 0 01.15.32.38.38 0 01-.16.31.81.81 0 01-.45.11 1.66 1.66 0 01-1.09-.42V12a2.17 2.17 0 001.07.24 1.88 1.88 0 001.18-.33 1.08 1.08 0 00.33-.91 1.05 1.05 0 00-.25-.7 2.42 2.42 0 00-.83-.57zM11 11.32a2.34 2.34 0 00.33-1.26A2.32 2.32 0 0011 9a1.81 1.81 0 00-.7-.75 2 2 0 00-1-.26 2.11 2.11 0 00-1.08.27 1.86 1.86 0 00-.73.74 2.46 2.46 0 00-.26 1.14 2.26 2.26 0 00.24 1 1.76 1.76 0 00.69.74 2.06 2.06 0 001 .3l.86 1h1.21L10 12.08a1.79 1.79 0 001-.76zm-1-.25a.94.94 0 01-.76.35.92.92 0 01-.76-.36 1.52 1.52 0 01-.29-1 1.53 1.53 0 01.29-1 1 1 0 01.78-.37.87.87 0 01.75.37 1.62 1.62 0 01.27 1 1.46 1.46 0 01-.28 1.01z" fill="url(#b)"/></svg>
|
||||
|
After Width: | Height: | Size: 2.0 KiB |
1
frontend/public/Logos/azure-vm.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 18 18"><defs><linearGradient id="a" x1="8.88" y1="12.21" x2="8.88" y2=".21" gradientUnits="userSpaceOnUse"><stop offset="0" stop-color="#0078d4"/><stop offset=".82" stop-color="#5ea0ef"/></linearGradient><linearGradient id="b" x1="8.88" y1="16.84" x2="8.88" y2="12.21" gradientUnits="userSpaceOnUse"><stop offset=".15" stop-color="#ccc"/><stop offset="1" stop-color="#707070"/></linearGradient></defs><rect x="-.12" y=".21" width="18" height="12" rx=".6" fill="url(#a)"/><path fill="#50e6ff" d="M11.88 4.46v3.49l-3 1.76v-3.5l3-1.75z"/><path fill="#c3f1ff" d="M11.88 4.46l-3 1.76-3-1.76 3-1.75 3 1.75z"/><path fill="#9cebff" d="M8.88 6.22v3.49l-3-1.76V4.46l3 1.76z"/><path fill="#c3f1ff" d="M5.88 7.95l3-1.74v3.5l-3-1.76z"/><path fill="#9cebff" d="M11.88 7.95l-3-1.74v3.5l3-1.76z"/><path d="M12.49 15.84c-1.78-.28-1.85-1.56-1.85-3.63H7.11c0 2.07-.06 3.35-1.84 3.63a1 1 0 00-.89 1h9a1 1 0 00-.89-1z" fill="url(#b)"/></svg>
|
||||
|
After Width: | Height: | Size: 973 B |
@@ -8,6 +8,7 @@
|
||||
"GET_STARTED_LOGS_MANAGEMENT": "SigNoz | Get Started | Logs",
|
||||
"GET_STARTED_INFRASTRUCTURE_MONITORING": "SigNoz | Get Started | Infrastructure",
|
||||
"GET_STARTED_AWS_MONITORING": "SigNoz | Get Started | AWS",
|
||||
"GET_STARTED_AZURE_MONITORING": "SigNoz | Get Started | AZURE",
|
||||
"TRACE": "SigNoz | Trace",
|
||||
"TRACE_DETAIL": "SigNoz | Trace Detail",
|
||||
"TRACES_EXPLORER": "SigNoz | Traces Explorer",
|
||||
|
||||
@@ -287,7 +287,7 @@ function CustomTimePicker({
|
||||
)
|
||||
}
|
||||
arrow={false}
|
||||
trigger="hover"
|
||||
trigger="click"
|
||||
open={open}
|
||||
onOpenChange={handleOpenChange}
|
||||
style={{
|
||||
|
||||
@@ -3,6 +3,7 @@ import './DropDown.styles.scss';
|
||||
import { EllipsisOutlined } from '@ant-design/icons';
|
||||
import { Button, Dropdown, MenuProps } from 'antd';
|
||||
import { useIsDarkMode } from 'hooks/useDarkMode';
|
||||
import { useState } from 'react';
|
||||
|
||||
function DropDown({ element }: { element: JSX.Element[] }): JSX.Element {
|
||||
const isDarkMode = useIsDarkMode();
|
||||
@@ -14,12 +15,24 @@ function DropDown({ element }: { element: JSX.Element[] }): JSX.Element {
|
||||
}),
|
||||
);
|
||||
|
||||
const [isDdOpen, setDdOpen] = useState<boolean>(false);
|
||||
|
||||
return (
|
||||
<Dropdown menu={{ items }}>
|
||||
<Dropdown
|
||||
menu={{
|
||||
items,
|
||||
onMouseEnter: (): void => setDdOpen(true),
|
||||
onMouseLeave: (): void => setDdOpen(false),
|
||||
}}
|
||||
open={isDdOpen}
|
||||
>
|
||||
<Button
|
||||
type="link"
|
||||
className={!isDarkMode ? 'dropdown-button--dark' : 'dropdown-button'}
|
||||
onClick={(e): void => e.preventDefault()}
|
||||
onClick={(e): void => {
|
||||
e.preventDefault();
|
||||
setDdOpen(true);
|
||||
}}
|
||||
>
|
||||
<EllipsisOutlined className="dropdown-icon" />
|
||||
</Button>
|
||||
|
||||
@@ -2,7 +2,7 @@ import './AddToQueryHOC.styles.scss';
|
||||
|
||||
import { Popover } from 'antd';
|
||||
import { OPERATORS } from 'constants/queryBuilder';
|
||||
import { memo, ReactNode, useCallback, useMemo } from 'react';
|
||||
import { memo, MouseEvent, ReactNode, useMemo } from 'react';
|
||||
|
||||
function AddToQueryHOC({
|
||||
fieldKey,
|
||||
@@ -10,9 +10,10 @@ function AddToQueryHOC({
|
||||
onAddToQuery,
|
||||
children,
|
||||
}: AddToQueryHOCProps): JSX.Element {
|
||||
const handleQueryAdd = useCallback(() => {
|
||||
const handleQueryAdd = (event: MouseEvent<HTMLDivElement>): void => {
|
||||
event.stopPropagation();
|
||||
onAddToQuery(fieldKey, fieldValue, OPERATORS.IN);
|
||||
}, [fieldKey, fieldValue, onAddToQuery]);
|
||||
};
|
||||
|
||||
const popOverContent = useMemo(() => <span>Add to query: {fieldKey}</span>, [
|
||||
fieldKey,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
/* eslint-disable sonarjs/cognitive-complexity */
|
||||
import './Uplot.styles.scss';
|
||||
|
||||
import * as Sentry from '@sentry/react';
|
||||
import { Typography } from 'antd';
|
||||
import { ToggleGraphProps } from 'components/Graph/types';
|
||||
import { LineChart } from 'lucide-react';
|
||||
@@ -13,7 +14,6 @@ import {
|
||||
useImperativeHandle,
|
||||
useRef,
|
||||
} from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
import UPlot from 'uplot';
|
||||
|
||||
import { dataMatch, optionsUpdateState } from './utils';
|
||||
@@ -139,7 +139,7 @@ const Uplot = forwardRef<ToggleGraphProps | undefined, UplotProps>(
|
||||
}
|
||||
|
||||
return (
|
||||
<ErrorBoundary FallbackComponent={ErrorBoundaryFallback}>
|
||||
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
|
||||
<div className="uplot-graph-container" ref={targetRef}>
|
||||
{data && data[0] && data[0]?.length === 0 ? (
|
||||
<div className="not-found">
|
||||
@@ -147,7 +147,7 @@ const Uplot = forwardRef<ToggleGraphProps | undefined, UplotProps>(
|
||||
</div>
|
||||
) : null}
|
||||
</div>
|
||||
</ErrorBoundary>
|
||||
</Sentry.ErrorBoundary>
|
||||
);
|
||||
},
|
||||
);
|
||||
|
||||
@@ -40,4 +40,5 @@ export const getComponentForPanelType = (
|
||||
export const AVAILABLE_EXPORT_PANEL_TYPES = [
|
||||
PANEL_TYPES.TIME_SERIES,
|
||||
PANEL_TYPES.TABLE,
|
||||
PANEL_TYPES.LIST,
|
||||
];
|
||||
|
||||
@@ -13,6 +13,7 @@ const ROUTES = {
|
||||
GET_STARTED_INFRASTRUCTURE_MONITORING:
|
||||
'/get-started/infrastructure-monitoring',
|
||||
GET_STARTED_AWS_MONITORING: '/get-started/aws-monitoring',
|
||||
GET_STARTED_AZURE_MONITORING: '/get-started/azure-monitoring',
|
||||
USAGE_EXPLORER: '/usage-explorer',
|
||||
APPLICATION: '/services',
|
||||
ALL_DASHBOARD: '/dashboard',
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
/* eslint-disable jsx-a11y/anchor-is-valid */
|
||||
import './AppLayout.styles.scss';
|
||||
|
||||
import * as Sentry from '@sentry/react';
|
||||
import { Flex } from 'antd';
|
||||
import getLocalStorageKey from 'api/browser/localstorage/get';
|
||||
import getDynamicConfigs from 'api/dynamicConfigs/getDynamicConfigs';
|
||||
@@ -27,7 +28,6 @@ import {
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
import { Helmet } from 'react-helmet-async';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useQueries } from 'react-query';
|
||||
@@ -236,7 +236,8 @@ function AppLayout(props: AppLayoutProps): JSX.Element {
|
||||
pathname === ROUTES.GET_STARTED_APPLICATION_MONITORING ||
|
||||
pathname === ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING ||
|
||||
pathname === ROUTES.GET_STARTED_LOGS_MANAGEMENT ||
|
||||
pathname === ROUTES.GET_STARTED_AWS_MONITORING;
|
||||
pathname === ROUTES.GET_STARTED_AWS_MONITORING ||
|
||||
pathname === ROUTES.GET_STARTED_AZURE_MONITORING;
|
||||
|
||||
const [showTrialExpiryBanner, setShowTrialExpiryBanner] = useState(false);
|
||||
|
||||
@@ -342,7 +343,7 @@ function AppLayout(props: AppLayoutProps): JSX.Element {
|
||||
/>
|
||||
)}
|
||||
<div className={cx('app-content', collapsed ? 'collapsed' : '')}>
|
||||
<ErrorBoundary FallbackComponent={ErrorBoundaryFallback}>
|
||||
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
|
||||
<LayoutContent>
|
||||
<ChildrenContainer
|
||||
style={{
|
||||
@@ -360,7 +361,7 @@ function AppLayout(props: AppLayoutProps): JSX.Element {
|
||||
{children}
|
||||
</ChildrenContainer>
|
||||
</LayoutContent>
|
||||
</ErrorBoundary>
|
||||
</Sentry.ErrorBoundary>
|
||||
</div>
|
||||
</Flex>
|
||||
</Layout>
|
||||
|
||||
@@ -77,7 +77,8 @@ function FormAlertRules({
|
||||
|
||||
const urlQuery = useUrlQuery();
|
||||
|
||||
const panelType = urlQuery.get(QueryParams.panelTypes) as PANEL_TYPES | null;
|
||||
// In case of alert the panel types should always be "Graph" only
|
||||
const panelType = PANEL_TYPES.TIME_SERIES;
|
||||
|
||||
const {
|
||||
currentQuery,
|
||||
|
||||
@@ -108,6 +108,7 @@ function GridCardGraph({
|
||||
query: updatedQuery,
|
||||
globalSelectedInterval,
|
||||
variables: getDashboardVariables(variables),
|
||||
fillGaps: widget.fillSpans,
|
||||
};
|
||||
}
|
||||
updatedQuery.builder.queryData[0].pageSize = 10;
|
||||
@@ -122,6 +123,7 @@ function GridCardGraph({
|
||||
limit: updatedQuery.builder.queryData[0].limit || 0,
|
||||
},
|
||||
},
|
||||
fillGaps: widget.fillSpans,
|
||||
};
|
||||
});
|
||||
|
||||
@@ -152,6 +154,7 @@ function GridCardGraph({
|
||||
widget?.query,
|
||||
widget?.panelTypes,
|
||||
widget.timePreferance,
|
||||
widget.fillSpans,
|
||||
requestData,
|
||||
],
|
||||
retry(failureCount, error): boolean {
|
||||
|
||||
@@ -55,6 +55,9 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
|
||||
role,
|
||||
);
|
||||
|
||||
const [editLoader, setEditLoader] = useState<boolean>(false);
|
||||
const [cloneLoader, setCloneLoader] = useState<boolean>(false);
|
||||
|
||||
const params = useUrlQuery();
|
||||
const orderColumnParam = params.get('columnKey');
|
||||
const orderQueryParam = params.get('order');
|
||||
@@ -113,6 +116,7 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
|
||||
}, [featureResponse, handleError]);
|
||||
|
||||
const onEditHandler = (record: GettableAlert) => (): void => {
|
||||
setEditLoader(true);
|
||||
featureResponse
|
||||
.refetch()
|
||||
.then(() => {
|
||||
@@ -129,9 +133,11 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
|
||||
|
||||
params.set(QueryParams.ruleId, record.id.toString());
|
||||
|
||||
setEditLoader(false);
|
||||
history.push(`${ROUTES.EDIT_ALERTS}?${params.toString()}`);
|
||||
})
|
||||
.catch(handleError);
|
||||
.catch(handleError)
|
||||
.finally(() => setEditLoader(false));
|
||||
};
|
||||
|
||||
const onCloneHandler = (
|
||||
@@ -143,33 +149,41 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
|
||||
};
|
||||
const apiReq = { data: copyAlert };
|
||||
|
||||
const response = await saveAlertApi(apiReq);
|
||||
try {
|
||||
setCloneLoader(true);
|
||||
const response = await saveAlertApi(apiReq);
|
||||
|
||||
if (response.statusCode === 200) {
|
||||
notificationsApi.success({
|
||||
message: 'Success',
|
||||
description: 'Alert cloned successfully',
|
||||
});
|
||||
if (response.statusCode === 200) {
|
||||
notificationsApi.success({
|
||||
message: 'Success',
|
||||
description: 'Alert cloned successfully',
|
||||
});
|
||||
|
||||
const { data: refetchData, status } = await refetch();
|
||||
if (status === 'success' && refetchData.payload) {
|
||||
setData(refetchData.payload || []);
|
||||
setTimeout(() => {
|
||||
const clonedAlert = refetchData.payload[refetchData.payload.length - 1];
|
||||
params.set(QueryParams.ruleId, String(clonedAlert.id));
|
||||
history.push(`${ROUTES.EDIT_ALERTS}?${params.toString()}`);
|
||||
}, 2000);
|
||||
}
|
||||
if (status === 'error') {
|
||||
const { data: refetchData, status } = await refetch();
|
||||
if (status === 'success' && refetchData.payload) {
|
||||
setData(refetchData.payload || []);
|
||||
setTimeout(() => {
|
||||
const clonedAlert = refetchData.payload[refetchData.payload.length - 1];
|
||||
params.set(QueryParams.ruleId, String(clonedAlert.id));
|
||||
history.push(`${ROUTES.EDIT_ALERTS}?${params.toString()}`);
|
||||
}, 2000);
|
||||
}
|
||||
if (status === 'error') {
|
||||
notificationsApi.error({
|
||||
message: t('something_went_wrong'),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
notificationsApi.error({
|
||||
message: t('something_went_wrong'),
|
||||
message: 'Error',
|
||||
description: response.error || t('something_went_wrong'),
|
||||
});
|
||||
}
|
||||
} else {
|
||||
notificationsApi.error({
|
||||
message: 'Error',
|
||||
description: response.error || t('something_went_wrong'),
|
||||
});
|
||||
} catch (error) {
|
||||
handleError();
|
||||
console.error(error);
|
||||
} finally {
|
||||
setCloneLoader(false);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -314,10 +328,20 @@ function ListAlert({ allAlertRules, refetch }: ListAlertProps): JSX.Element {
|
||||
setData={setData}
|
||||
id={id}
|
||||
/>,
|
||||
<ColumnButton key="2" onClick={onEditHandler(record)} type="link">
|
||||
<ColumnButton
|
||||
key="2"
|
||||
onClick={onEditHandler(record)}
|
||||
type="link"
|
||||
loading={editLoader}
|
||||
>
|
||||
Edit
|
||||
</ColumnButton>,
|
||||
<ColumnButton key="3" onClick={onCloneHandler(record)} type="link">
|
||||
<ColumnButton
|
||||
key="3"
|
||||
onClick={onCloneHandler(record)}
|
||||
type="link"
|
||||
loading={cloneLoader}
|
||||
>
|
||||
Clone
|
||||
</ColumnButton>,
|
||||
<DeleteAlert
|
||||
|
||||
@@ -27,5 +27,8 @@ export const ColumnButton = styled(ButtonComponent)`
|
||||
padding-left: 0;
|
||||
padding-right: 0;
|
||||
margin-right: 1.5em;
|
||||
width: 100%;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
`;
|
||||
|
||||
@@ -699,7 +699,16 @@ function DashboardsList(): JSX.Element {
|
||||
New Dashboard
|
||||
</Button>
|
||||
</Dropdown>
|
||||
<Button type="text" className="learn-more">
|
||||
<Button
|
||||
type="text"
|
||||
className="learn-more"
|
||||
onClick={(): void => {
|
||||
window.open(
|
||||
'https://signoz.io/docs/userguide/manage-dashboards?utm_source=product&utm_medium=dashboard-list-empty-state',
|
||||
'_blank',
|
||||
);
|
||||
}}
|
||||
>
|
||||
Learn more
|
||||
</Button>
|
||||
<ArrowUpRight size={16} className="learn-more-arrow" />
|
||||
|
||||
@@ -16,6 +16,7 @@ import { useOptionsMenu } from 'container/OptionsMenu';
|
||||
import { useActiveLog } from 'hooks/logs/useActiveLog';
|
||||
import { useCopyLogLink } from 'hooks/logs/useCopyLogLink';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import PeriscopeTable from 'periscope/components/Table/Table';
|
||||
import { memo, useCallback, useMemo, useRef } from 'react';
|
||||
import { Virtuoso, VirtuosoHandle } from 'react-virtuoso';
|
||||
// interfaces
|
||||
@@ -157,6 +158,7 @@ function LogsExplorerList({
|
||||
|
||||
return (
|
||||
<div className="logs-list-view-container">
|
||||
<PeriscopeTable />
|
||||
{(isLoading || (isFetching && logs.length === 0)) && <LogsLoading />}
|
||||
|
||||
{!isLoading &&
|
||||
|
||||
@@ -37,7 +37,7 @@ import { useNotifications } from 'hooks/useNotifications';
|
||||
import useUrlQueryData from 'hooks/useUrlQueryData';
|
||||
import { FlatLogData } from 'lib/logs/flatLogData';
|
||||
import { getPaginationQueryData } from 'lib/newQueryBuilder/getPaginationQueryData';
|
||||
import { defaultTo, isEmpty, omit } from 'lodash-es';
|
||||
import { cloneDeep, defaultTo, isEmpty, omit, set } from 'lodash-es';
|
||||
import { Sliders } from 'lucide-react';
|
||||
import { SELECTED_VIEWS } from 'pages/LogsExplorer/utils';
|
||||
import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react';
|
||||
@@ -117,6 +117,12 @@ function LogsExplorerViews({
|
||||
return stagedQuery.builder.queryData.find((item) => !item.disabled) || null;
|
||||
}, [stagedQuery]);
|
||||
|
||||
const { options, config } = useOptionsMenu({
|
||||
storageKey: LOCALSTORAGE.LOGS_LIST_OPTIONS,
|
||||
dataSource: initialDataSource || DataSource.LOGS,
|
||||
aggregateOperator: listQuery?.aggregateOperator || StringOperators.NOOP,
|
||||
});
|
||||
|
||||
const orderByTimestamp: OrderByPayload | null = useMemo(() => {
|
||||
const timestampOrderBy = listQuery?.orderBy.find(
|
||||
(item) => item.columnName === 'timestamp',
|
||||
@@ -174,10 +180,10 @@ function LogsExplorerViews({
|
||||
() =>
|
||||
updateAllQueriesOperators(
|
||||
currentQuery || initialQueriesMap.logs,
|
||||
PANEL_TYPES.TIME_SERIES,
|
||||
selectedPanelType,
|
||||
DataSource.LOGS,
|
||||
),
|
||||
[currentQuery, updateAllQueriesOperators],
|
||||
[currentQuery, selectedPanelType, updateAllQueriesOperators],
|
||||
);
|
||||
|
||||
const handleModeChange = (panelType: PANEL_TYPES): void => {
|
||||
@@ -309,6 +315,14 @@ function LogsExplorerViews({
|
||||
isLoading: isUpdateDashboardLoading,
|
||||
} = useUpdateDashboard();
|
||||
|
||||
const getUpdatedQueryForExport = useCallback((): Query => {
|
||||
const updatedQuery = cloneDeep(currentQuery);
|
||||
|
||||
set(updatedQuery, 'builder.queryData[0].pageSize', 10);
|
||||
|
||||
return updatedQuery;
|
||||
}, [currentQuery]);
|
||||
|
||||
const handleExport = useCallback(
|
||||
(dashboard: Dashboard | null): void => {
|
||||
if (!dashboard || !panelType) return;
|
||||
@@ -319,11 +333,17 @@ function LogsExplorerViews({
|
||||
|
||||
const widgetId = v4();
|
||||
|
||||
const query =
|
||||
panelType === PANEL_TYPES.LIST
|
||||
? getUpdatedQueryForExport()
|
||||
: exportDefaultQuery;
|
||||
|
||||
const updatedDashboard = addEmptyWidgetInDashboardJSONWithQuery(
|
||||
dashboard,
|
||||
exportDefaultQuery,
|
||||
query,
|
||||
widgetId,
|
||||
panelTypeParam,
|
||||
options.selectColumns,
|
||||
);
|
||||
|
||||
updateDashboard(updatedDashboard, {
|
||||
@@ -353,7 +373,7 @@ function LogsExplorerViews({
|
||||
}
|
||||
|
||||
const dashboardEditView = generateExportToDashboardLink({
|
||||
query: exportDefaultQuery,
|
||||
query,
|
||||
panelType: panelTypeParam,
|
||||
dashboardId: data.payload?.uuid || '',
|
||||
widgetId,
|
||||
@@ -365,7 +385,9 @@ function LogsExplorerViews({
|
||||
});
|
||||
},
|
||||
[
|
||||
getUpdatedQueryForExport,
|
||||
exportDefaultQuery,
|
||||
options.selectColumns,
|
||||
history,
|
||||
notifications,
|
||||
panelType,
|
||||
@@ -460,12 +482,6 @@ function LogsExplorerViews({
|
||||
selectedView,
|
||||
]);
|
||||
|
||||
const { options, config } = useOptionsMenu({
|
||||
storageKey: LOCALSTORAGE.LOGS_LIST_OPTIONS,
|
||||
dataSource: initialDataSource || DataSource.METRICS,
|
||||
aggregateOperator: listQuery?.aggregateOperator || StringOperators.NOOP,
|
||||
});
|
||||
|
||||
const chartData = useMemo(() => {
|
||||
if (!stagedQuery) return [];
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
height: 100%;
|
||||
|
||||
.resize-table {
|
||||
height: calc(100% - 40px);
|
||||
height: calc(100% - 70px);
|
||||
overflow: scroll;
|
||||
overflow-x: hidden;
|
||||
|
||||
|
||||
@@ -40,12 +40,46 @@
|
||||
}
|
||||
|
||||
.variable-select {
|
||||
.ant-select-dropdown {
|
||||
max-width: 300px;
|
||||
.ant-select-item {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.all-label {
|
||||
display: flex;
|
||||
gap: 16px;
|
||||
}
|
||||
|
||||
.dropdown-checkbox-label {
|
||||
display: grid;
|
||||
grid-template-columns: 24px 1fr;
|
||||
}
|
||||
|
||||
.dropdown-value {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
|
||||
.option-text {
|
||||
max-width: 180px;
|
||||
padding: 0 8px;
|
||||
}
|
||||
|
||||
.toggle-tag-label {
|
||||
padding-left: 8px;
|
||||
right: 40px;
|
||||
font-weight: normal;
|
||||
position: absolute;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.dropdown-styles {
|
||||
min-width: 300px;
|
||||
max-width: 350px;
|
||||
}
|
||||
|
||||
.lightMode {
|
||||
.variable-item {
|
||||
.variable-name {
|
||||
|
||||
@@ -138,6 +138,7 @@ function DashboardVariableSelection(): JSX.Element | null {
|
||||
}}
|
||||
onValueUpdate={onValueUpdate}
|
||||
variablesToGetUpdated={variablesToGetUpdated}
|
||||
setVariablesToGetUpdated={setVariablesToGetUpdated}
|
||||
/>
|
||||
))}
|
||||
</Row>
|
||||
|
||||
@@ -54,6 +54,7 @@ describe('VariableItem', () => {
|
||||
existingVariables={{}}
|
||||
onValueUpdate={mockOnValueUpdate}
|
||||
variablesToGetUpdated={[]}
|
||||
setVariablesToGetUpdated={(): void => {}}
|
||||
/>
|
||||
</MockQueryClientProvider>,
|
||||
);
|
||||
@@ -69,6 +70,7 @@ describe('VariableItem', () => {
|
||||
existingVariables={{}}
|
||||
onValueUpdate={mockOnValueUpdate}
|
||||
variablesToGetUpdated={[]}
|
||||
setVariablesToGetUpdated={(): void => {}}
|
||||
/>
|
||||
</MockQueryClientProvider>,
|
||||
);
|
||||
@@ -83,6 +85,7 @@ describe('VariableItem', () => {
|
||||
existingVariables={{}}
|
||||
onValueUpdate={mockOnValueUpdate}
|
||||
variablesToGetUpdated={[]}
|
||||
setVariablesToGetUpdated={(): void => {}}
|
||||
/>
|
||||
</MockQueryClientProvider>,
|
||||
);
|
||||
@@ -111,6 +114,7 @@ describe('VariableItem', () => {
|
||||
existingVariables={{}}
|
||||
onValueUpdate={mockOnValueUpdate}
|
||||
variablesToGetUpdated={[]}
|
||||
setVariablesToGetUpdated={(): void => {}}
|
||||
/>
|
||||
</MockQueryClientProvider>,
|
||||
);
|
||||
@@ -123,6 +127,8 @@ describe('VariableItem', () => {
|
||||
const customVariableData = {
|
||||
...mockCustomVariableData,
|
||||
allSelected: true,
|
||||
showALLOption: true,
|
||||
multiSelect: true,
|
||||
};
|
||||
|
||||
render(
|
||||
@@ -132,6 +138,7 @@ describe('VariableItem', () => {
|
||||
existingVariables={{}}
|
||||
onValueUpdate={mockOnValueUpdate}
|
||||
variablesToGetUpdated={[]}
|
||||
setVariablesToGetUpdated={(): void => {}}
|
||||
/>
|
||||
</MockQueryClientProvider>,
|
||||
);
|
||||
@@ -147,6 +154,7 @@ describe('VariableItem', () => {
|
||||
existingVariables={{}}
|
||||
onValueUpdate={mockOnValueUpdate}
|
||||
variablesToGetUpdated={[]}
|
||||
setVariablesToGetUpdated={(): void => {}}
|
||||
/>
|
||||
</MockQueryClientProvider>,
|
||||
);
|
||||
|
||||
@@ -1,15 +1,29 @@
|
||||
/* eslint-disable jsx-a11y/click-events-have-key-events */
|
||||
/* eslint-disable jsx-a11y/no-static-element-interactions */
|
||||
/* eslint-disable @typescript-eslint/no-explicit-any */
|
||||
/* eslint-disable react/jsx-props-no-spreading */
|
||||
/* eslint-disable no-nested-ternary */
|
||||
import './DashboardVariableSelection.styles.scss';
|
||||
|
||||
import { orange } from '@ant-design/colors';
|
||||
import { WarningOutlined } from '@ant-design/icons';
|
||||
import { Input, Popover, Select, Typography } from 'antd';
|
||||
import {
|
||||
Checkbox,
|
||||
Input,
|
||||
Popover,
|
||||
Select,
|
||||
Tag,
|
||||
Tooltip,
|
||||
Typography,
|
||||
} from 'antd';
|
||||
import { CheckboxChangeEvent } from 'antd/es/checkbox';
|
||||
import dashboardVariablesQuery from 'api/dashboard/variables/dashboardVariablesQuery';
|
||||
import { REACT_QUERY_KEY } from 'constants/reactQueryKeys';
|
||||
import { commaValuesParser } from 'lib/dashbaordVariables/customCommaValuesParser';
|
||||
import sortValues from 'lib/dashbaordVariables/sortVariableValues';
|
||||
import { debounce, isArray, isString } from 'lodash-es';
|
||||
import map from 'lodash-es/map';
|
||||
import { memo, useEffect, useMemo, useState } from 'react';
|
||||
import { ChangeEvent, memo, useEffect, useMemo, useState } from 'react';
|
||||
import { useQuery } from 'react-query';
|
||||
import { IDashboardVariable } from 'types/api/dashboard/getAll';
|
||||
import { VariableResponseProps } from 'types/api/dashboard/variables/query';
|
||||
@@ -23,6 +37,11 @@ const ALL_SELECT_VALUE = '__ALL__';
|
||||
|
||||
const variableRegexPattern = /\{\{\s*?\.([^\s}]+)\s*?\}\}/g;
|
||||
|
||||
enum ToggleTagValue {
|
||||
Only = 'Only',
|
||||
All = 'All',
|
||||
}
|
||||
|
||||
interface VariableItemProps {
|
||||
variableData: IDashboardVariable;
|
||||
existingVariables: Record<string, IDashboardVariable>;
|
||||
@@ -33,12 +52,17 @@ interface VariableItemProps {
|
||||
allSelected: boolean,
|
||||
) => void;
|
||||
variablesToGetUpdated: string[];
|
||||
setVariablesToGetUpdated: React.Dispatch<React.SetStateAction<string[]>>;
|
||||
}
|
||||
|
||||
const getSelectValue = (
|
||||
selectedValue: IDashboardVariable['selectedValue'],
|
||||
variableData: IDashboardVariable,
|
||||
): string | string[] => {
|
||||
if (Array.isArray(selectedValue)) {
|
||||
if (!variableData.multiSelect && selectedValue.length === 1) {
|
||||
return selectedValue[0]?.toString() || '';
|
||||
}
|
||||
return selectedValue.map((item) => item.toString());
|
||||
}
|
||||
return selectedValue?.toString() || '';
|
||||
@@ -50,6 +74,7 @@ function VariableItem({
|
||||
existingVariables,
|
||||
onValueUpdate,
|
||||
variablesToGetUpdated,
|
||||
setVariablesToGetUpdated,
|
||||
}: VariableItemProps): JSX.Element {
|
||||
const [optionsData, setOptionsData] = useState<(string | number | boolean)[]>(
|
||||
[],
|
||||
@@ -148,6 +173,10 @@ function VariableItem({
|
||||
}
|
||||
|
||||
setOptionsData(newOptionsData);
|
||||
} else {
|
||||
setVariablesToGetUpdated((prev) =>
|
||||
prev.filter((name) => name !== variableData.name),
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
@@ -193,7 +222,7 @@ function VariableItem({
|
||||
});
|
||||
|
||||
const handleChange = (value: string | string[]): void => {
|
||||
if (variableData.name)
|
||||
if (variableData.name) {
|
||||
if (
|
||||
value === ALL_SELECT_VALUE ||
|
||||
(Array.isArray(value) && value.includes(ALL_SELECT_VALUE)) ||
|
||||
@@ -203,25 +232,29 @@ function VariableItem({
|
||||
} else {
|
||||
onValueUpdate(variableData.name, variableData.id, value, false);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// do not debounce the above function as we do not need debounce in select variables
|
||||
const debouncedHandleChange = debounce(handleChange, 500);
|
||||
|
||||
const { selectedValue } = variableData;
|
||||
const selectedValueStringified = useMemo(() => getSelectValue(selectedValue), [
|
||||
selectedValue,
|
||||
]);
|
||||
const selectedValueStringified = useMemo(
|
||||
() => getSelectValue(selectedValue, variableData),
|
||||
[selectedValue, variableData],
|
||||
);
|
||||
|
||||
const selectValue = variableData.allSelected
|
||||
? 'ALL'
|
||||
: selectedValueStringified;
|
||||
const enableSelectAll = variableData.multiSelect && variableData.showALLOption;
|
||||
|
||||
const mode =
|
||||
const selectValue =
|
||||
variableData.allSelected && enableSelectAll
|
||||
? 'ALL'
|
||||
: selectedValueStringified;
|
||||
|
||||
const mode: 'multiple' | undefined =
|
||||
variableData.multiSelect && !variableData.allSelected
|
||||
? 'multiple'
|
||||
: undefined;
|
||||
const enableSelectAll = variableData.multiSelect && variableData.showALLOption;
|
||||
|
||||
useEffect(() => {
|
||||
// Fetch options for CUSTOM Type
|
||||
@@ -231,6 +264,117 @@ function VariableItem({
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [variableData.type, variableData.customValue]);
|
||||
|
||||
const checkAll = (e: MouseEvent): void => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
const isChecked =
|
||||
variableData.allSelected || selectValue.includes(ALL_SELECT_VALUE);
|
||||
|
||||
if (isChecked) {
|
||||
handleChange([]);
|
||||
} else {
|
||||
handleChange(ALL_SELECT_VALUE);
|
||||
}
|
||||
};
|
||||
|
||||
const handleOptionSelect = (
|
||||
e: CheckboxChangeEvent,
|
||||
option: string | number | boolean,
|
||||
): void => {
|
||||
const newSelectedValue = Array.isArray(selectedValue)
|
||||
? ((selectedValue.filter(
|
||||
(val) => val.toString() !== option.toString(),
|
||||
) as unknown) as string[])
|
||||
: [];
|
||||
|
||||
if (
|
||||
!e.target.checked &&
|
||||
Array.isArray(selectedValueStringified) &&
|
||||
selectedValueStringified.includes(option.toString())
|
||||
) {
|
||||
if (newSelectedValue.length === 0) {
|
||||
handleChange(ALL_SELECT_VALUE);
|
||||
return;
|
||||
}
|
||||
if (newSelectedValue.length === 1) {
|
||||
handleChange(newSelectedValue[0].toString());
|
||||
return;
|
||||
}
|
||||
handleChange(newSelectedValue);
|
||||
} else if (!e.target.checked && selectedValue === option.toString()) {
|
||||
handleChange(ALL_SELECT_VALUE);
|
||||
} else if (newSelectedValue.length === optionsData.length - 1) {
|
||||
handleChange(ALL_SELECT_VALUE);
|
||||
}
|
||||
};
|
||||
|
||||
const [optionState, setOptionState] = useState({
|
||||
tag: '',
|
||||
visible: false,
|
||||
});
|
||||
|
||||
function currentToggleTagValue({
|
||||
option,
|
||||
}: {
|
||||
option: string;
|
||||
}): ToggleTagValue {
|
||||
if (
|
||||
option.toString() === selectValue ||
|
||||
(Array.isArray(selectValue) &&
|
||||
selectValue?.includes(option.toString()) &&
|
||||
selectValue.length === 1)
|
||||
) {
|
||||
return ToggleTagValue.All;
|
||||
}
|
||||
return ToggleTagValue.Only;
|
||||
}
|
||||
|
||||
function handleToggle(e: ChangeEvent, option: string): void {
|
||||
e.stopPropagation();
|
||||
const mode = currentToggleTagValue({ option: option as string });
|
||||
const isChecked =
|
||||
variableData.allSelected ||
|
||||
option.toString() === selectValue ||
|
||||
(Array.isArray(selectValue) && selectValue?.includes(option.toString()));
|
||||
|
||||
if (isChecked) {
|
||||
if (mode === ToggleTagValue.Only) {
|
||||
handleChange(option.toString());
|
||||
} else if (!variableData.multiSelect) {
|
||||
handleChange(option.toString());
|
||||
} else {
|
||||
handleChange(ALL_SELECT_VALUE);
|
||||
}
|
||||
} else {
|
||||
handleChange(option.toString());
|
||||
}
|
||||
}
|
||||
|
||||
function retProps(
|
||||
option: string,
|
||||
): {
|
||||
onMouseOver: () => void;
|
||||
onMouseOut: () => void;
|
||||
} {
|
||||
return {
|
||||
onMouseOver: (): void =>
|
||||
setOptionState({
|
||||
tag: option.toString(),
|
||||
visible: true,
|
||||
}),
|
||||
onMouseOut: (): void =>
|
||||
setOptionState({
|
||||
tag: option.toString(),
|
||||
visible: false,
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
const ensureValidOption = (option: string): boolean =>
|
||||
!(
|
||||
currentToggleTagValue({ option }) === ToggleTagValue.All && !enableSelectAll
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="variable-item">
|
||||
<Typography.Text className="variable-name" ellipsis>
|
||||
@@ -264,19 +408,35 @@ function VariableItem({
|
||||
onChange={handleChange}
|
||||
bordered={false}
|
||||
placeholder="Select value"
|
||||
placement="bottomRight"
|
||||
placement="bottomLeft"
|
||||
mode={mode}
|
||||
dropdownMatchSelectWidth={false}
|
||||
style={SelectItemStyle}
|
||||
loading={isLoading}
|
||||
showSearch
|
||||
data-testid="variable-select"
|
||||
className="variable-select"
|
||||
popupClassName="dropdown-styles"
|
||||
maxTagCount={4}
|
||||
getPopupContainer={popupContainer}
|
||||
// eslint-disable-next-line react/no-unstable-nested-components
|
||||
tagRender={(props): JSX.Element => (
|
||||
<Tag closable onClose={props.onClose}>
|
||||
{props.value}
|
||||
</Tag>
|
||||
)}
|
||||
// eslint-disable-next-line react/no-unstable-nested-components
|
||||
maxTagPlaceholder={(omittedValues): JSX.Element => (
|
||||
<Tooltip title={omittedValues.map(({ value }) => value).join(', ')}>
|
||||
<span>+ {omittedValues.length} </span>
|
||||
</Tooltip>
|
||||
)}
|
||||
>
|
||||
{enableSelectAll && (
|
||||
<Select.Option data-testid="option-ALL" value={ALL_SELECT_VALUE}>
|
||||
ALL
|
||||
<div className="all-label" onClick={(e): void => checkAll(e as any)}>
|
||||
<Checkbox checked={variableData.allSelected} />
|
||||
ALL
|
||||
</div>
|
||||
</Select.Option>
|
||||
)}
|
||||
{map(optionsData, (option) => (
|
||||
@@ -285,7 +445,45 @@ function VariableItem({
|
||||
key={option.toString()}
|
||||
value={option}
|
||||
>
|
||||
{option.toString()}
|
||||
<div
|
||||
className={variableData.multiSelect ? 'dropdown-checkbox-label' : ''}
|
||||
>
|
||||
{variableData.multiSelect && (
|
||||
<Checkbox
|
||||
onChange={(e): void => {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
handleOptionSelect(e, option);
|
||||
}}
|
||||
checked={
|
||||
variableData.allSelected ||
|
||||
option.toString() === selectValue ||
|
||||
(Array.isArray(selectValue) &&
|
||||
selectValue?.includes(option.toString()))
|
||||
}
|
||||
/>
|
||||
)}
|
||||
<div
|
||||
className="dropdown-value"
|
||||
{...retProps(option as string)}
|
||||
onClick={(e): void => handleToggle(e as any, option as string)}
|
||||
>
|
||||
<Tooltip title={option.toString()} placement="bottomRight">
|
||||
<Typography.Text ellipsis className="option-text">
|
||||
{option.toString()}
|
||||
</Typography.Text>
|
||||
</Tooltip>
|
||||
|
||||
{variableData.multiSelect &&
|
||||
optionState.tag === option.toString() &&
|
||||
optionState.visible &&
|
||||
ensureValidOption(option as string) && (
|
||||
<Typography.Text className="toggle-tag-label">
|
||||
{currentToggleTagValue({ option: option as string })}
|
||||
</Typography.Text>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</Select.Option>
|
||||
))}
|
||||
</Select>
|
||||
|
||||
@@ -42,4 +42,5 @@ export const VariableValue = styled(Typography)`
|
||||
export const SelectItemStyle = {
|
||||
minWidth: 120,
|
||||
fontSize: '0.8rem',
|
||||
width: '100%',
|
||||
};
|
||||
|
||||
@@ -72,10 +72,16 @@ function LeftContainer({
|
||||
globalSelectedInterval,
|
||||
graphType: getGraphType(selectedGraph || selectedWidget.panelTypes),
|
||||
query: stagedQuery,
|
||||
fillGaps: selectedWidget.fillSpans || false,
|
||||
}));
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [stagedQuery, selectedTime, globalSelectedInterval]);
|
||||
}, [
|
||||
stagedQuery,
|
||||
selectedTime,
|
||||
selectedWidget.fillSpans,
|
||||
globalSelectedInterval,
|
||||
]);
|
||||
|
||||
const queryResponse = useGetQueryRange(
|
||||
requestData,
|
||||
|
||||
@@ -429,6 +429,21 @@
|
||||
}
|
||||
}
|
||||
|
||||
.bucket-config {
|
||||
.label {
|
||||
color: var(--bg-ink-400);
|
||||
}
|
||||
|
||||
.bucket-input {
|
||||
border: 1px solid var(--bg-vanilla-300);
|
||||
background: var(--bg-vanilla-300);
|
||||
|
||||
.ant-input {
|
||||
background: var(--bg-vanilla-300);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.panel-time-text {
|
||||
color: var(--bg-ink-400);
|
||||
}
|
||||
|
||||
@@ -189,50 +189,6 @@ export const panelTypeDataSourceFormValuesMap: Record<
|
||||
},
|
||||
},
|
||||
},
|
||||
[PANEL_TYPES.HISTOGRAM]: {
|
||||
[DataSource.LOGS]: {
|
||||
builder: {
|
||||
queryData: [
|
||||
'filters',
|
||||
'aggregateOperator',
|
||||
'aggregateAttribute',
|
||||
'groupBy',
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
'functions',
|
||||
],
|
||||
},
|
||||
},
|
||||
[DataSource.METRICS]: {
|
||||
builder: {
|
||||
queryData: [
|
||||
'filters',
|
||||
'aggregateOperator',
|
||||
'aggregateAttribute',
|
||||
'groupBy',
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
'functions',
|
||||
'spaceAggregation',
|
||||
],
|
||||
},
|
||||
},
|
||||
[DataSource.TRACES]: {
|
||||
builder: {
|
||||
queryData: [
|
||||
'filters',
|
||||
'aggregateOperator',
|
||||
'aggregateAttribute',
|
||||
'groupBy',
|
||||
'limit',
|
||||
'having',
|
||||
'orderBy',
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
[PANEL_TYPES.TABLE]: {
|
||||
[DataSource.LOGS]: {
|
||||
builder: {
|
||||
|
||||
@@ -0,0 +1,111 @@
|
||||
## Setup
|
||||
|
||||
### Installing with OpenTelemetry Helm Charts
|
||||
|
||||
Prior to installation, you must ensure your Kubernetes cluster is ready and that you have the necessary permissions to deploy applications. Follow these steps to use Helm for setting up the Collector:
|
||||
|
||||
1. **Add the OpenTelemetry Helm repository:**
|
||||
|
||||
```bash
|
||||
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
|
||||
```
|
||||
|
||||
2. **Prepare the `otel-collector-values.yaml` Configuration**
|
||||
|
||||
#### Azure Event Hub Receiver Configuration
|
||||
If you haven't created the logs Event Hub, you can create one by following the steps in the [Azure Event Hubs documentation](../../bootstrapping/data-ingestion).
|
||||
|
||||
and replace the placeholders `<Primary Connection String>` with the primary connection string for your Event Hub, it should look something like this:
|
||||
|
||||
```yaml
|
||||
connection: Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName
|
||||
```
|
||||
The Event Hub docs have a step to create a SAS policy for the event hub and copy the connection string.
|
||||
|
||||
#### Azure Monitor Receiver Configuration
|
||||
|
||||
You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor.
|
||||
|
||||
1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal.
|
||||
You can name it `signoz-central-collector-app` the redirect URI can be empty.
|
||||
2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read acess can be given to the full subscription.
|
||||
3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`.
|
||||
|
||||
4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section.
|
||||
|
||||
<figure data-zoomable align="center">
|
||||
<img
|
||||
src="/img/docs/azure-monitoring/service-principal-app-overview.webp"
|
||||
alt="Application Overview"
|
||||
/>
|
||||
<figcaption>
|
||||
<i>
|
||||
Application Overview
|
||||
</i>
|
||||
</figcaption>
|
||||
</figure>
|
||||
|
||||
5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file.
|
||||
|
||||
6. Ensure you replace the placeholders `<region>` and `<ingestion-key>` with the appropriate values for your signoz cloud instance.
|
||||
|
||||
|
||||
|
||||
Below is an example targeting the SigNoz backend with Azure Monitor receivers configured:
|
||||
|
||||
```yaml
|
||||
service:
|
||||
pipelines:
|
||||
metrics/am:
|
||||
receivers: [azuremonitor]
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, azureeventhub]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
azureeventhub:
|
||||
connection: Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName
|
||||
format: "azure"
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
client_id: "<Client ID>"
|
||||
client_secret: "<Client Secret>"
|
||||
resource_groups: ["<rg-1>"]
|
||||
collection_interval: 60s
|
||||
processors:
|
||||
batch: {}
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "ingest.<region>.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
"signoz-access-token": "<ingestion-key>"
|
||||
```
|
||||
|
||||
3. **Deploy the OpenTelemetry Collector to your Kubernetes cluster:**
|
||||
|
||||
You'll need to prepare a custom configuration file, say `otel-collector-values.yaml`, that matches your environment's specific needs. Replace `<namespace>` with the Kubernetes namespace where you wish to install the Collector.
|
||||
|
||||
```bash
|
||||
helm install -n <namespace> --create-namespace otel-collector open-telemetry/opentelemetry-collector -f otel-collector-values.yaml
|
||||
|
||||
```
|
||||
|
||||
For more detail, refer to the [official OpenTelemetry Helm Chart documentation](https://github.com/open-telemetry/opentelemetry-helm-charts/tree/main/charts/opentelemetry-collector), which offers comprehensive installation instructions and configuration options tailored to your environment's requirements.
|
||||
@@ -0,0 +1,8 @@
|
||||
## Prerequisite
|
||||
|
||||
- An AKS cluster
|
||||
- Central Collector Setup
|
||||
|
||||
|
||||
|
||||
Once you have setup the Central Collector, it will automatically start collecting your Logs.
|
||||
@@ -0,0 +1,8 @@
|
||||
## Prerequisite
|
||||
|
||||
- An AKS cluster
|
||||
- Central Collector Setup
|
||||
|
||||
|
||||
|
||||
Once you have setup the Central Collector, it will automatically start sending your Metrics to SigNoz.
|
||||
@@ -0,0 +1,40 @@
|
||||
## Overview
|
||||
|
||||
Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises.
|
||||
|
||||
Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An active Azure subscription
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Create an Event Hubs Namespace
|
||||
|
||||
1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace.
|
||||
2. Fill in the required details:
|
||||
- **Resource group**: Choose or create a new one.
|
||||
- **Namespace name**: Enter a unique name, e.g., `<orgName>-obs-signoz`.
|
||||
- **Pricing tier**: Based on your logging requirements.
|
||||
- **Region**: Should match the region of the resources you want to monitor.
|
||||
- **Throughput units**: Choose based on logging needs.
|
||||
3. Click "Review + create" and then "Create".
|
||||
|
||||
### 2. Create an Event Hub
|
||||
|
||||
1. Navigate to the Event Hubs namespace you created in the Azure portal.
|
||||
2. Click "+ Event Hub" to create a new event hub.
|
||||
3. Enter a name, e.g., `logs`and click "Create"
|
||||
|
||||
### 3. Create a SAS Policy and Copy Connection String
|
||||
|
||||
1. Navigate to the Event Hub in the Azure portal.
|
||||
2. Click "Shared access policies" in the left menu.
|
||||
3. Click "Add" to create a new policy named `signozListen`.
|
||||
4. Select the "Listen" permission and set the expiration time.
|
||||
5. Click "Save".
|
||||
6. Copy the *Connection string–primary key*.
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,16 @@
|
||||
## Application level Tracing
|
||||
|
||||
For application-level tracing, you can use the OpenTelemetry SDKs integrated with your application. These SDKs will automatically collect and forward traces to the Central Collector.
|
||||
|
||||
|
||||
|
||||
To see how you can instrument your applications like FastAPI, NextJS, Node.js, Spring etc. you can check out the **Application Monitoring** section available at the start of this onboarding or you can checkout this [documentation](https://signoz.io/docs/instrumentation/).
|
||||
|
||||
## Configure the OpenTelemetry SDK
|
||||
|
||||
```bash
|
||||
# Set env vars or config file
|
||||
export OTEL_EXPORTER_OTLP_ENDPOINT="http://otel-collector.kubelet-otel.svc.cluster.local:4318/"
|
||||
```
|
||||
|
||||
For application-level traces and metrics, configure your application to use the `kube-dns` name of the **Central Collector** you set up earlier.
|
||||
@@ -0,0 +1,129 @@
|
||||
Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it:
|
||||
|
||||
|
||||
## Download and Install the OpenTelemetry Collector Binary
|
||||
|
||||
Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation.
|
||||
|
||||
|
||||
|
||||
## Configure OpenTelemetry Collector
|
||||
|
||||
While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**.
|
||||
|
||||
```yaml
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
client_id: "<Client ID>"
|
||||
client_secret: "<Client Secret>"
|
||||
resource_groups: ["<rg-1>"]
|
||||
collection_interval: 60s
|
||||
processors:
|
||||
batch: {}
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
|
||||
service:
|
||||
pipelines:
|
||||
metrics/am:
|
||||
receivers: [azuremonitor]
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, azureeventhub]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
|
||||
```
|
||||
**NOTE:**
|
||||
Replace the `<Primary Connection String>` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this:
|
||||
|
||||
```bash
|
||||
Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Azure Monitor Receiver Configuration
|
||||
|
||||
You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor.
|
||||
|
||||
1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal.
|
||||
You can name it `signoz-central-collector-app` the redirect URI can be empty.
|
||||
|
||||
2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription.
|
||||
|
||||
3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`.
|
||||
|
||||
4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section.
|
||||
|
||||
5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file.
|
||||
|
||||
**NOTE:**
|
||||
By following the above steps, you will get the values for `<Subscription ID>`, `<AD Tenant ID>`, `<Client ID>` and `<Client Secret>` which you need to fill in the `config.yaml` file.
|
||||
|
||||
|
||||
|
||||
## Run the Collector
|
||||
|
||||
With your configuration file ready, you can now start the Collector using the following command:
|
||||
|
||||
```bash
|
||||
# Runs in background with the configuration we just created
|
||||
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Open Ports
|
||||
|
||||
You will need to open the following ports on your Azure VM:
|
||||
- 4317 for gRPC
|
||||
- 4318 for HTTP
|
||||
|
||||
You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports.
|
||||
|
||||
|
||||
|
||||
### Validating the Deployment
|
||||
|
||||
Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors.
|
||||
|
||||
|
||||
|
||||
## Configure DNS label For Collector
|
||||
|
||||
To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps:
|
||||
|
||||
1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector.
|
||||
2. Click on the "Configuration" tab.
|
||||
3. Enter the DNS label you want to use for the collector.
|
||||
4. Click on "Save".
|
||||
|
||||
**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting)
|
||||
@@ -0,0 +1,33 @@
|
||||
Follow these steps if you want to setup logging for your Azure App Service.
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- EventHub Setup
|
||||
- Central Collector Setup
|
||||
|
||||
|
||||
## Setup
|
||||
|
||||
1. Navigate to your App Service in the Azure portal
|
||||
|
||||
2. Search for "Diagnostic settings" in the left navigation menu
|
||||
|
||||
3. Click on "Add Diagnostic Setting"
|
||||
|
||||
4. Select the desired log categories to export:
|
||||
- HTTP logs
|
||||
- App Service Console Logs
|
||||
- App Service Application Logs
|
||||
- Access Audit Logs
|
||||
- IPSecurity Audit logs
|
||||
- App Service Platform logs
|
||||
|
||||
|
||||
5. Configure the destination details as **"Stream to an Event Hub"** and select the Event Hub namespace and Event Hub name created during the EventHub Setup in the earlier steps.
|
||||
|
||||
6. Save the diagnostic settings
|
||||
|
||||
|
||||
This will start sending your Azure App Service Logs to SigNoz!
|
||||
@@ -0,0 +1,25 @@
|
||||
Follow these steps if you want to monitor System metrics like CPU Percentage, Memory Percentage etc. of your Azure App Service.
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- EventHub Setup
|
||||
- Central Collector Setup
|
||||
|
||||
## Dashboard Example
|
||||
|
||||
Once you have completed the prerequisites, you can start monitoring your Azure App Service's system metrics with SigNoz Cloud. Here's how you can do it:
|
||||
|
||||
1. Log in to your SigNoz account
|
||||
2. Navigate to the Dashboards section, and [add a dashboard](https://signoz.io/docs/userguide/manage-dashboards/)
|
||||
3. Add a Timeseries Panel
|
||||
4. In **Metrics**, select `azure_memorypercentage_total` and **Avg By** select tag `location`
|
||||
5. In Filter say `name = <app-svc-plan-name>`
|
||||
6. Hit “Save Changes” and you now have Memory Usage of your App Service in a Dashboard for reporting and alerting
|
||||
|
||||
In this way, you can monitor system metrics of your Azure App Service in SigNoz Cloud.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/app-service/metrics/#troubleshooting)
|
||||
@@ -0,0 +1,54 @@
|
||||
## Overview
|
||||
|
||||
Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises.
|
||||
|
||||
Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An active Azure subscription
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Create an Event Hubs Namespace
|
||||
|
||||
1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace.
|
||||
2. Fill in the required details:
|
||||
- **Resource group**: Choose or create a new one.
|
||||
- **Namespace name**: Enter a unique name, e.g., `<orgName>-obs-signoz`.
|
||||
- **Pricing tier**: Based on your logging requirements.
|
||||
- **Region**: Should match the region of the resources you want to monitor.
|
||||
- **Throughput units**: Choose based on logging needs.
|
||||
3. Click "Review + create" and then "Create".
|
||||
|
||||
### 2. Create an Event Hub
|
||||
|
||||
1. Navigate to the Event Hubs namespace you created in the Azure portal.
|
||||
2. Click "+ Event Hub" to create a new event hub.
|
||||
3. Enter a name, e.g., `logs`and click "Create"
|
||||
|
||||
### 3. Create a SAS Policy and Copy Connection String
|
||||
|
||||
1. Navigate to the Event Hub in the Azure portal.
|
||||
2. Click "Shared access policies" in the left menu.
|
||||
3. Click "Add" to create a new policy named `signozListen`.
|
||||
4. Select the "Listen" permission and set the expiration time.
|
||||
5. Click "Save".
|
||||
6. Copy the *Connection string–primary key*.
|
||||
|
||||
<!-- ### 4. Configure OpenTelemetry Integration
|
||||
|
||||
1. Add a new receiver to [Central Collector Setup](../collector-setup).
|
||||
2. Configure the receiver with the Event Hubs namespace connection string and the event hub name.
|
||||
|
||||
### 5. Stream Logs to Event Hubs
|
||||
|
||||
1. Configure Azure services' diagnostic settings to forward logs to the Event Hub.
|
||||
2. Ensure logs are in [Azure Common Log Format](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema).
|
||||
3. Verify logs are streaming to Event Hubs and received by SigNoz.
|
||||
|
||||
For detailed instructions, refer to the Azure documentation: [Azure Event Hub](https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-create). -->
|
||||
|
||||
<!-- For more configuration options, see the [OpenTelemetry Documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/azureeventhubreceiver). -->
|
||||
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
## Application level Tracing
|
||||
|
||||
For application-level tracing, you can use the OpenTelemetry SDKs integrated with your application. These SDKs will automatically collect and forward traces to the Central Collector.
|
||||
|
||||
|
||||
|
||||
To see how you can instrument your applications like FastAPI, NextJS, Node.js, Spring etc. you can check out the **Application Monitoring** section available at the start of this onboarding or you can checkout this [documentation](https://signoz.io/docs/instrumentation/).
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Azure Subscription & App Service**: You need an active Azure subscription with a running Azure App Service instance.
|
||||
2. **Central Collector Setup**: Make sure you have set up the Central Collector
|
||||
|
||||
|
||||
|
||||
## Configure the OpenTelemetry SDK
|
||||
|
||||
```bash
|
||||
# Set env vars or config file
|
||||
export OTEL_EXPORTER_OTLP_ENDPOINT="http://<Your-Central-Collector-DNS>:4318/"
|
||||
```
|
||||
|
||||
For application-level traces, configure your application to use the DNS name of the **Central Collector** you set up earlier. This Central Collector will automatically forward the collected data to SigNoz.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/app-service/tracing/#troubleshooting)
|
||||
@@ -0,0 +1,129 @@
|
||||
Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it:
|
||||
|
||||
|
||||
## Download and Install the OpenTelemetry Collector Binary
|
||||
|
||||
Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation.
|
||||
|
||||
|
||||
|
||||
## Configure OpenTelemetry Collector
|
||||
|
||||
While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**.
|
||||
|
||||
```yaml
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
client_id: "<Client ID>"
|
||||
client_secret: "<Client Secret>"
|
||||
resource_groups: ["<rg-1>"]
|
||||
collection_interval: 60s
|
||||
processors:
|
||||
batch: {}
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
|
||||
service:
|
||||
pipelines:
|
||||
metrics/am:
|
||||
receivers: [azuremonitor]
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, azureeventhub]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
|
||||
```
|
||||
**NOTE:**
|
||||
Replace the `<Primary Connection String>` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this:
|
||||
|
||||
```bash
|
||||
Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Azure Monitor Receiver Configuration
|
||||
|
||||
You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor.
|
||||
|
||||
1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal.
|
||||
You can name it `signoz-central-collector-app` the redirect URI can be empty.
|
||||
|
||||
2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription.
|
||||
|
||||
3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`.
|
||||
|
||||
4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section.
|
||||
|
||||
5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file.
|
||||
|
||||
**NOTE:**
|
||||
By following the above steps, you will get the values for `<Subscription ID>`, `<AD Tenant ID>`, `<Client ID>` and `<Client Secret>` which you need to fill in the `config.yaml` file.
|
||||
|
||||
|
||||
|
||||
## Run the Collector
|
||||
|
||||
With your configuration file ready, you can now start the Collector using the following command:
|
||||
|
||||
```bash
|
||||
# Runs in background with the configuration we just created
|
||||
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Open Ports
|
||||
|
||||
You will need to open the following ports on your Azure VM:
|
||||
- 4317 for gRPC
|
||||
- 4318 for HTTP
|
||||
|
||||
You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports.
|
||||
|
||||
|
||||
|
||||
### Validating the Deployment
|
||||
|
||||
Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors.
|
||||
|
||||
|
||||
|
||||
## Configure DNS label For Collector
|
||||
|
||||
To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps:
|
||||
|
||||
1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector.
|
||||
2. Click on the "Configuration" tab.
|
||||
3. Enter the DNS label you want to use for the collector.
|
||||
4. Click on "Save".
|
||||
|
||||
**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting)
|
||||
@@ -0,0 +1,23 @@
|
||||
Follow these steps if you want to setup logging for your Azure App Service.
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- EventHub Setup
|
||||
- Central Collector Setup
|
||||
|
||||
## Setup
|
||||
|
||||
1. Navigate to the relevant Storage Account in the Azure portal
|
||||
2. Search for "Diagnostic settings" in the left navigation menu
|
||||
3. Click on `blob` under the storage account
|
||||
4. Click on "Add Diagnostic Setting"
|
||||
5. Select the desired log categories to export:
|
||||
- Storage Read
|
||||
- Storage Write
|
||||
- Storage Delete
|
||||
5. Configure the destination details as "**Stream to an Event Hub**" and select the Event Hub namespace and Event Hub name created during the EventHub Setup
|
||||
6. Save the diagnostic settings
|
||||
|
||||
That's it! You have successfully set up logging for your Azure Blob Storage.
|
||||
@@ -0,0 +1,28 @@
|
||||
Follow these steps if you want to monitor system metrics like Total Requests, Total Ingress / Egress, and Total Errors etc., of your Azure Blob Storage.
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Azure Subscription and Azure Blob storage instance running
|
||||
- Central Collector Setup
|
||||
|
||||
|
||||
|
||||
## Dashboard Example
|
||||
|
||||
Once you have completed the prerequisites, you can start monitoring your Azure Blob Storage's system metrics with SigNoz.
|
||||
|
||||
1. Log in to your SigNoz account.
|
||||
2. Navigate to the Dashboards, and [add a dashboard](https://signoz.io/docs/userguide/manage-dashboards/)
|
||||
3. Add a Timeseries Panel
|
||||
4. In **Metrics**, select `azure_ingress_total` and **Avg B*y* select tag `location`
|
||||
5. In Filter say `name = <storage-account-name>`
|
||||
6. Hit “Save Changes”. You now have Total Ingress of your Azure Blob Storage in a Dashboard for reporting and alerting
|
||||
|
||||
|
||||
That's it! You have successfully set up monitoring for your Azure Blob Storage's system metrics with SigNoz. You can now start creating other panels and dashboards to monitor other Azure Blob Storage's metrics.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/az-blob-storage/metrics/#troubleshooting)
|
||||
@@ -0,0 +1,54 @@
|
||||
## Overview
|
||||
|
||||
Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises.
|
||||
|
||||
Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An active Azure subscription
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Create an Event Hubs Namespace
|
||||
|
||||
1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace.
|
||||
2. Fill in the required details:
|
||||
- **Resource group**: Choose or create a new one.
|
||||
- **Namespace name**: Enter a unique name, e.g., `<orgName>-obs-signoz`.
|
||||
- **Pricing tier**: Based on your logging requirements.
|
||||
- **Region**: Should match the region of the resources you want to monitor.
|
||||
- **Throughput units**: Choose based on logging needs.
|
||||
3. Click "Review + create" and then "Create".
|
||||
|
||||
### 2. Create an Event Hub
|
||||
|
||||
1. Navigate to the Event Hubs namespace you created in the Azure portal.
|
||||
2. Click "+ Event Hub" to create a new event hub.
|
||||
3. Enter a name, e.g., `logs`and click "Create"
|
||||
|
||||
### 3. Create a SAS Policy and Copy Connection String
|
||||
|
||||
1. Navigate to the Event Hub in the Azure portal.
|
||||
2. Click "Shared access policies" in the left menu.
|
||||
3. Click "Add" to create a new policy named `signozListen`.
|
||||
4. Select the "Listen" permission and set the expiration time.
|
||||
5. Click "Save".
|
||||
6. Copy the *Connection string–primary key*.
|
||||
|
||||
<!-- ### 4. Configure OpenTelemetry Integration
|
||||
|
||||
1. Add a new receiver to [Central Collector Setup](../collector-setup).
|
||||
2. Configure the receiver with the Event Hubs namespace connection string and the event hub name.
|
||||
|
||||
### 5. Stream Logs to Event Hubs
|
||||
|
||||
1. Configure Azure services' diagnostic settings to forward logs to the Event Hub.
|
||||
2. Ensure logs are in [Azure Common Log Format](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema).
|
||||
3. Verify logs are streaming to Event Hubs and received by SigNoz.
|
||||
|
||||
For detailed instructions, refer to the Azure documentation: [Azure Event Hub](https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-create). -->
|
||||
|
||||
<!-- For more configuration options, see the [OpenTelemetry Documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/azureeventhubreceiver). -->
|
||||
|
||||
|
||||
@@ -0,0 +1,129 @@
|
||||
Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it:
|
||||
|
||||
|
||||
## Download and Install the OpenTelemetry Collector Binary
|
||||
|
||||
Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation.
|
||||
|
||||
|
||||
|
||||
## Configure OpenTelemetry Collector
|
||||
|
||||
While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**.
|
||||
|
||||
```yaml
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
client_id: "<Client ID>"
|
||||
client_secret: "<Client Secret>"
|
||||
resource_groups: ["<rg-1>"]
|
||||
collection_interval: 60s
|
||||
processors:
|
||||
batch: {}
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
|
||||
service:
|
||||
pipelines:
|
||||
metrics/am:
|
||||
receivers: [azuremonitor]
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, azureeventhub]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
|
||||
```
|
||||
**NOTE:**
|
||||
Replace the `<Primary Connection String>` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this:
|
||||
|
||||
```bash
|
||||
Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Azure Monitor Receiver Configuration
|
||||
|
||||
You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor.
|
||||
|
||||
1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal.
|
||||
You can name it `signoz-central-collector-app` the redirect URI can be empty.
|
||||
|
||||
2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription.
|
||||
|
||||
3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`.
|
||||
|
||||
4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section.
|
||||
|
||||
5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file.
|
||||
|
||||
**NOTE:**
|
||||
By following the above steps, you will get the values for `<Subscription ID>`, `<AD Tenant ID>`, `<Client ID>` and `<Client Secret>` which you need to fill in the `config.yaml` file.
|
||||
|
||||
|
||||
|
||||
## Run the Collector
|
||||
|
||||
With your configuration file ready, you can now start the Collector using the following command:
|
||||
|
||||
```bash
|
||||
# Runs in background with the configuration we just created
|
||||
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Open Ports
|
||||
|
||||
You will need to open the following ports on your Azure VM:
|
||||
- 4317 for gRPC
|
||||
- 4318 for HTTP
|
||||
|
||||
You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports.
|
||||
|
||||
|
||||
|
||||
### Validating the Deployment
|
||||
|
||||
Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors.
|
||||
|
||||
|
||||
|
||||
## Configure DNS label For Collector
|
||||
|
||||
To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps:
|
||||
|
||||
1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector.
|
||||
2. Click on the "Configuration" tab.
|
||||
3. Enter the DNS label you want to use for the collector.
|
||||
4. Click on "Save".
|
||||
|
||||
**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting)
|
||||
@@ -0,0 +1,28 @@
|
||||
Follow these steps if you want to setup logging for your Azure Container App.
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- EventHub Setup
|
||||
- Central Collector Setup
|
||||
|
||||
|
||||
## Setup
|
||||
|
||||
1. Navigate to your Container Apps in the Azure portal
|
||||
2. Click on "Container Apps Environment" to open the Container Apps Environment
|
||||
3. Search for "Diagnostic settings" in the left navigation menu
|
||||
4. Click on "Add Diagnostic Setting"
|
||||
5. Select the desired log categories to export:
|
||||
- Container App console logs
|
||||
- Container App system logs
|
||||
- Spring App console logs
|
||||
|
||||
|
||||
6. Configure the destination details as **"Stream to an Event Hub"** and select the Event Hub namespace and Event Hub name created during the EventHub Setup.
|
||||
|
||||
7. Save the diagnostic settings
|
||||
|
||||
|
||||
That's it! You have successfully set up logging for your Azure Container App.
|
||||
@@ -0,0 +1,27 @@
|
||||
Follow these steps if you want to monitor System metrics like CPU Percentage, Memory Percentage etc. of your Azure Container App.
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Azure subscription and an Azure Container App instance running
|
||||
- Central Collector Setup
|
||||
|
||||
|
||||
|
||||
# Dashboard Example
|
||||
|
||||
Once you have completed the prerequisites, you can start monitoring your Azure Container App's system metrics with SigNoz. Here's how you can do it:
|
||||
|
||||
1. Log in to your SigNoz account.
|
||||
2. Navigate to the Dashboards, and [add an dashboard](https://signoz.io/docs/userguide/manage-dashboards/)
|
||||
3. Add a Timeseries Panel
|
||||
4. In **Metrics**, select `azure_replicas_count` and **Avg By** select tag `name`
|
||||
5. In Filter say `type = Microsoft.App/containerApps`
|
||||
6. Hit “Save Changes”. You now have Memory Usage of your Container App in a Dashboard for reporting and alerting
|
||||
|
||||
In this way, you can monitor system metrics of your Azure Container App in SigNoz!
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/az-container-apps/metrics/#troubleshooting)
|
||||
@@ -0,0 +1,54 @@
|
||||
## Overview
|
||||
|
||||
Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises.
|
||||
|
||||
Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An active Azure subscription
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Create an Event Hubs Namespace
|
||||
|
||||
1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace.
|
||||
2. Fill in the required details:
|
||||
- **Resource group**: Choose or create a new one.
|
||||
- **Namespace name**: Enter a unique name, e.g., `<orgName>-obs-signoz`.
|
||||
- **Pricing tier**: Based on your logging requirements.
|
||||
- **Region**: Should match the region of the resources you want to monitor.
|
||||
- **Throughput units**: Choose based on logging needs.
|
||||
3. Click "Review + create" and then "Create".
|
||||
|
||||
### 2. Create an Event Hub
|
||||
|
||||
1. Navigate to the Event Hubs namespace you created in the Azure portal.
|
||||
2. Click "+ Event Hub" to create a new event hub.
|
||||
3. Enter a name, e.g., `logs`and click "Create"
|
||||
|
||||
### 3. Create a SAS Policy and Copy Connection String
|
||||
|
||||
1. Navigate to the Event Hub in the Azure portal.
|
||||
2. Click "Shared access policies" in the left menu.
|
||||
3. Click "Add" to create a new policy named `signozListen`.
|
||||
4. Select the "Listen" permission and set the expiration time.
|
||||
5. Click "Save".
|
||||
6. Copy the *Connection string–primary key*.
|
||||
|
||||
<!-- ### 4. Configure OpenTelemetry Integration
|
||||
|
||||
1. Add a new receiver to [Central Collector Setup](../collector-setup).
|
||||
2. Configure the receiver with the Event Hubs namespace connection string and the event hub name.
|
||||
|
||||
### 5. Stream Logs to Event Hubs
|
||||
|
||||
1. Configure Azure services' diagnostic settings to forward logs to the Event Hub.
|
||||
2. Ensure logs are in [Azure Common Log Format](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema).
|
||||
3. Verify logs are streaming to Event Hubs and received by SigNoz.
|
||||
|
||||
For detailed instructions, refer to the Azure documentation: [Azure Event Hub](https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-create). -->
|
||||
|
||||
<!-- For more configuration options, see the [OpenTelemetry Documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/azureeventhubreceiver). -->
|
||||
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
## Application level Tracing
|
||||
|
||||
For application-level tracing, you can use the OpenTelemetry SDKs integrated with your application. These SDKs will automatically collect and forward traces to the Central Collector.
|
||||
|
||||
|
||||
|
||||
To see how you can instrument your applications like FastAPI, NextJS, Node.js, Spring etc. you can check out the **Application Monitoring** section available at the start of this onboarding or you can checkout this [documentation](https://signoz.io/docs/instrumentation/).
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Azure Subscription & App Service**: You need an active Azure subscription with a running Azure App Service instance.
|
||||
2. **Central Collector Setup**: Make sure you have set up the Central Collector
|
||||
|
||||
|
||||
|
||||
## Configure the OpenTelemetry SDK
|
||||
|
||||
```bash
|
||||
# Set env vars or config file
|
||||
export OTEL_EXPORTER_OTLP_ENDPOINT="http://<Your-Central-Collector-DNS>:4318/"
|
||||
```
|
||||
|
||||
For application-level traces, configure your application to use the DNS name of the **Central Collector** you set up earlier. This Central Collector will automatically forward the collected data to SigNoz.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/az-container-apps/tracing/#troubleshooting)
|
||||
@@ -0,0 +1,129 @@
|
||||
Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it:
|
||||
|
||||
|
||||
## Download and Install the OpenTelemetry Collector Binary
|
||||
|
||||
Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation.
|
||||
|
||||
|
||||
|
||||
## Configure OpenTelemetry Collector
|
||||
|
||||
While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**.
|
||||
|
||||
```yaml
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
client_id: "<Client ID>"
|
||||
client_secret: "<Client Secret>"
|
||||
resource_groups: ["<rg-1>"]
|
||||
collection_interval: 60s
|
||||
processors:
|
||||
batch: {}
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
|
||||
service:
|
||||
pipelines:
|
||||
metrics/am:
|
||||
receivers: [azuremonitor]
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, azureeventhub]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
|
||||
```
|
||||
**NOTE:**
|
||||
Replace the `<Primary Connection String>` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this:
|
||||
|
||||
```bash
|
||||
Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Azure Monitor Receiver Configuration
|
||||
|
||||
You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor.
|
||||
|
||||
1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal.
|
||||
You can name it `signoz-central-collector-app` the redirect URI can be empty.
|
||||
|
||||
2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription.
|
||||
|
||||
3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`.
|
||||
|
||||
4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section.
|
||||
|
||||
5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file.
|
||||
|
||||
**NOTE:**
|
||||
By following the above steps, you will get the values for `<Subscription ID>`, `<AD Tenant ID>`, `<Client ID>` and `<Client Secret>` which you need to fill in the `config.yaml` file.
|
||||
|
||||
|
||||
|
||||
## Run the Collector
|
||||
|
||||
With your configuration file ready, you can now start the Collector using the following command:
|
||||
|
||||
```bash
|
||||
# Runs in background with the configuration we just created
|
||||
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Open Ports
|
||||
|
||||
You will need to open the following ports on your Azure VM:
|
||||
- 4317 for gRPC
|
||||
- 4318 for HTTP
|
||||
|
||||
You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports.
|
||||
|
||||
|
||||
|
||||
### Validating the Deployment
|
||||
|
||||
Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors.
|
||||
|
||||
|
||||
|
||||
## Configure DNS label For Collector
|
||||
|
||||
To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps:
|
||||
|
||||
1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector.
|
||||
2. Click on the "Configuration" tab.
|
||||
3. Enter the DNS label you want to use for the collector.
|
||||
4. Click on "Save".
|
||||
|
||||
**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting)
|
||||
@@ -0,0 +1,21 @@
|
||||
Follow these steps if you want to setup logging for your Azure Functions.
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- EventHub Setup
|
||||
- Central Collector Setup
|
||||
|
||||
|
||||
## Setup
|
||||
|
||||
1. Navigate to your Azure Function in the Azure portal
|
||||
2. Search for "Diagnostic settings" in the left navigation menu
|
||||
3. Click on "Add Diagnostic Setting"
|
||||
4. Select the desired log categories to export:
|
||||
- Function App logs
|
||||
5. Configure the destination details as "**Stream to an Event Hub**" and select the Event Hub namespace and Event Hub name created during the EventHub Setup
|
||||
6. Save the diagnostic settings
|
||||
|
||||
That's it! You have successfully set up logging for your Azure Function.
|
||||
@@ -0,0 +1,28 @@
|
||||
Follow these steps if you want to monitor System metrics like CPU Percentage, Memory Percentage etc. of your Azure Functions.
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Azure subscription and an Azure Container App instance running
|
||||
- Central Collector Setup
|
||||
|
||||
|
||||
|
||||
## Dashboard Example
|
||||
|
||||
Once you have completed the prerequisites, you can start monitoring your Azure Function's system metrics with SigNoz. Here's how you can do it:
|
||||
|
||||
1. Log in to your SigNoz account.
|
||||
2. Navigate to the Dashboards, and add an dashboard
|
||||
3. Add a Timeseries Panel
|
||||
4. In *Metrics*, select `azure_requests_total` and *Avg By* select tag `location`
|
||||
5. In Filter say `name = <function-name>`
|
||||
6. Hit “Save Changes” You now have Total Requests of your Azure Function in a Dashboard for reporting and alerting
|
||||
|
||||
|
||||
That's it! You have successfully set up monitoring for your Azure Function's system metrics with SigNoz.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/az-fns/metrics/#troubleshooting)
|
||||
@@ -0,0 +1,54 @@
|
||||
## Overview
|
||||
|
||||
Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises.
|
||||
|
||||
Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An active Azure subscription
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Create an Event Hubs Namespace
|
||||
|
||||
1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace.
|
||||
2. Fill in the required details:
|
||||
- **Resource group**: Choose or create a new one.
|
||||
- **Namespace name**: Enter a unique name, e.g., `<orgName>-obs-signoz`.
|
||||
- **Pricing tier**: Based on your logging requirements.
|
||||
- **Region**: Should match the region of the resources you want to monitor.
|
||||
- **Throughput units**: Choose based on logging needs.
|
||||
3. Click "Review + create" and then "Create".
|
||||
|
||||
### 2. Create an Event Hub
|
||||
|
||||
1. Navigate to the Event Hubs namespace you created in the Azure portal.
|
||||
2. Click "+ Event Hub" to create a new event hub.
|
||||
3. Enter a name, e.g., `logs`and click "Create"
|
||||
|
||||
### 3. Create a SAS Policy and Copy Connection String
|
||||
|
||||
1. Navigate to the Event Hub in the Azure portal.
|
||||
2. Click "Shared access policies" in the left menu.
|
||||
3. Click "Add" to create a new policy named `signozListen`.
|
||||
4. Select the "Listen" permission and set the expiration time.
|
||||
5. Click "Save".
|
||||
6. Copy the *Connection string–primary key*.
|
||||
|
||||
<!-- ### 4. Configure OpenTelemetry Integration
|
||||
|
||||
1. Add a new receiver to [Central Collector Setup](../collector-setup).
|
||||
2. Configure the receiver with the Event Hubs namespace connection string and the event hub name.
|
||||
|
||||
### 5. Stream Logs to Event Hubs
|
||||
|
||||
1. Configure Azure services' diagnostic settings to forward logs to the Event Hub.
|
||||
2. Ensure logs are in [Azure Common Log Format](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema).
|
||||
3. Verify logs are streaming to Event Hubs and received by SigNoz.
|
||||
|
||||
For detailed instructions, refer to the Azure documentation: [Azure Event Hub](https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-create). -->
|
||||
|
||||
<!-- For more configuration options, see the [OpenTelemetry Documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/azureeventhubreceiver). -->
|
||||
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
## Application level Tracing
|
||||
|
||||
For application-level tracing, you can use the OpenTelemetry SDKs integrated with your application. These SDKs will automatically collect and forward traces to the Central Collector.
|
||||
|
||||
|
||||
|
||||
To see how you can instrument your applications like FastAPI, NextJS, Node.js, Spring etc. you can check out the **Application Monitoring** section available at the start of this onboarding or you can checkout this [documentation](https://signoz.io/docs/instrumentation/).
|
||||
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Azure Subscription & App Service**: You need an active Azure subscription with a running Azure Function App instance.
|
||||
2. **Central Collector Setup**: Make sure you have set up the Central Collector
|
||||
|
||||
|
||||
|
||||
## Configure the OpenTelemetry SDK
|
||||
|
||||
```bash
|
||||
# Set env vars or config file
|
||||
export OTEL_EXPORTER_OTLP_ENDPOINT="http://<Your-Central-Collector-DNS>:4318/"
|
||||
```
|
||||
|
||||
For application-level traces, configure your application to use the DNS name of the **Central Collector** you set up earlier. This Central Collector will automatically forward the collected data to SigNoz.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/az-fns/tracing/#troubleshooting)
|
||||
@@ -0,0 +1,129 @@
|
||||
Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it:
|
||||
|
||||
|
||||
## Download and Install the OpenTelemetry Collector Binary
|
||||
|
||||
Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation.
|
||||
|
||||
|
||||
|
||||
## Configure OpenTelemetry Collector
|
||||
|
||||
While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**.
|
||||
|
||||
```yaml
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
client_id: "<Client ID>"
|
||||
client_secret: "<Client Secret>"
|
||||
resource_groups: ["<rg-1>"]
|
||||
collection_interval: 60s
|
||||
processors:
|
||||
batch: {}
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
|
||||
service:
|
||||
pipelines:
|
||||
metrics/am:
|
||||
receivers: [azuremonitor]
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, azureeventhub]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
|
||||
```
|
||||
**NOTE:**
|
||||
Replace the `<Primary Connection String>` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this:
|
||||
|
||||
```bash
|
||||
Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Azure Monitor Receiver Configuration
|
||||
|
||||
You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor.
|
||||
|
||||
1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal.
|
||||
You can name it `signoz-central-collector-app` the redirect URI can be empty.
|
||||
|
||||
2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription.
|
||||
|
||||
3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`.
|
||||
|
||||
4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section.
|
||||
|
||||
5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file.
|
||||
|
||||
**NOTE:**
|
||||
By following the above steps, you will get the values for `<Subscription ID>`, `<AD Tenant ID>`, `<Client ID>` and `<Client Secret>` which you need to fill in the `config.yaml` file.
|
||||
|
||||
|
||||
|
||||
## Run the Collector
|
||||
|
||||
With your configuration file ready, you can now start the Collector using the following command:
|
||||
|
||||
```bash
|
||||
# Runs in background with the configuration we just created
|
||||
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Open Ports
|
||||
|
||||
You will need to open the following ports on your Azure VM:
|
||||
- 4317 for gRPC
|
||||
- 4318 for HTTP
|
||||
|
||||
You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports.
|
||||
|
||||
|
||||
|
||||
### Validating the Deployment
|
||||
|
||||
Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors.
|
||||
|
||||
|
||||
|
||||
## Configure DNS label For Collector
|
||||
|
||||
To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps:
|
||||
|
||||
1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector.
|
||||
2. Click on the "Configuration" tab.
|
||||
3. Enter the DNS label you want to use for the collector.
|
||||
4. Click on "Save".
|
||||
|
||||
**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting)
|
||||
@@ -0,0 +1,31 @@
|
||||
## Prerequisite
|
||||
|
||||
- Azure subscription and Database instance running
|
||||
- Central Collector Setup
|
||||
- [SQL monitoring profile](https://learn.microsoft.com/en-us/azure/azure-sql/database/sql-insights-enable?view=azuresql#create-sql-monitoring-profile) created to monitor the databases in Azure Monitor
|
||||
|
||||
|
||||
|
||||
|
||||
## Setup
|
||||
|
||||
Once you have completed the prerequisites, you can start monitoring your Database's system metrics with SigNoz. Here's how you can do it:
|
||||
|
||||
1. Log in to your SigNoz account.
|
||||
2. Navigate to the Dashboards Section, and [add an dashboard](https://signoz.io/docs/userguide/manage-dashboards/)
|
||||
3. Add a Timeseries Panel
|
||||
4. In **Metrics**, select `azure_storage_maximum` and **Avg By** select tag `location`
|
||||
5. In Filter say `name = <database-name>`
|
||||
6. Hit “Save Changes”. You now have Memory Usage of your Database in a Dashboard for reporting and alerting
|
||||
|
||||
That's it! You have successfully set up monitoring for your Database's system metrics with SigNoz.
|
||||
|
||||
|
||||
|
||||
**NOTE:**
|
||||
Make sure you have created a sql monitoring profile in Azure Monitor if not, follow this guide to [Create SQL Monitoring Profile](https://learn.microsoft.com/en-us/azure/azure-sql/database/sql-insights-enable?view=azuresql#create-sql-monitoring-profile).
|
||||
You can monitor multiple databases in a single profile.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/db-metrics/#troubleshooting)
|
||||
@@ -0,0 +1,54 @@
|
||||
## Overview
|
||||
|
||||
Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises.
|
||||
|
||||
Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An active Azure subscription
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Create an Event Hubs Namespace
|
||||
|
||||
1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace.
|
||||
2. Fill in the required details:
|
||||
- **Resource group**: Choose or create a new one.
|
||||
- **Namespace name**: Enter a unique name, e.g., `<orgName>-obs-signoz`.
|
||||
- **Pricing tier**: Based on your logging requirements.
|
||||
- **Region**: Should match the region of the resources you want to monitor.
|
||||
- **Throughput units**: Choose based on logging needs.
|
||||
3. Click "Review + create" and then "Create".
|
||||
|
||||
### 2. Create an Event Hub
|
||||
|
||||
1. Navigate to the Event Hubs namespace you created in the Azure portal.
|
||||
2. Click "+ Event Hub" to create a new event hub.
|
||||
3. Enter a name, e.g., `logs`and click "Create"
|
||||
|
||||
### 3. Create a SAS Policy and Copy Connection String
|
||||
|
||||
1. Navigate to the Event Hub in the Azure portal.
|
||||
2. Click "Shared access policies" in the left menu.
|
||||
3. Click "Add" to create a new policy named `signozListen`.
|
||||
4. Select the "Listen" permission and set the expiration time.
|
||||
5. Click "Save".
|
||||
6. Copy the *Connection string–primary key*.
|
||||
|
||||
<!-- ### 4. Configure OpenTelemetry Integration
|
||||
|
||||
1. Add a new receiver to [Central Collector Setup](../collector-setup).
|
||||
2. Configure the receiver with the Event Hubs namespace connection string and the event hub name.
|
||||
|
||||
### 5. Stream Logs to Event Hubs
|
||||
|
||||
1. Configure Azure services' diagnostic settings to forward logs to the Event Hub.
|
||||
2. Ensure logs are in [Azure Common Log Format](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema).
|
||||
3. Verify logs are streaming to Event Hubs and received by SigNoz.
|
||||
|
||||
For detailed instructions, refer to the Azure documentation: [Azure Event Hub](https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-create). -->
|
||||
|
||||
<!-- For more configuration options, see the [OpenTelemetry Documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/azureeventhubreceiver). -->
|
||||
|
||||
|
||||
@@ -0,0 +1,134 @@
|
||||
## Prerequisites
|
||||
|
||||
- An Azure subscription with Azure VM and SSH access enabled
|
||||
- Central Collector Setup
|
||||
|
||||
|
||||
### Connect to the VM
|
||||
The [SSH Keys Guide](https://learn.microsoft.com/en-us/azure/virtual-machines/ssh-keys-portal#connect-to-the-vm) has steps on how to connect to your VM via SSH.
|
||||
|
||||
|
||||
|
||||
### Install OpenTelemetry Collector
|
||||
|
||||
Follow the [OpenTelemetry SigNoz documentation](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) to install the OpenTelemetry Collector.
|
||||
|
||||
|
||||
|
||||
### Configure Collector
|
||||
|
||||
We send the logs, traces and metrics to the central collector that we set up in the previous step instead of SigNoz directly, in order to adopt a scalable architecture pattern. We recommend to our users to use the same pattern in your Azure subscription.
|
||||
|
||||
Replace the content of the `config.yaml` file that you created while installing the collector.
|
||||
|
||||
```yaml
|
||||
receivers:
|
||||
filelog:
|
||||
include: [ <file paths> ] # /var/log/myservice/*.json
|
||||
operators:
|
||||
- type: json_parser
|
||||
timestamp:
|
||||
parse_from: attributes.time
|
||||
layout: '%Y-%m-%d %H:%M:%S'
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
hostmetrics:
|
||||
collection_interval: 60s
|
||||
scrapers:
|
||||
cpu: {}
|
||||
disk: {}
|
||||
load: {}
|
||||
filesystem: {}
|
||||
memory: {}
|
||||
network: {}
|
||||
paging: {}
|
||||
process:
|
||||
mute_process_name_error: true
|
||||
mute_process_exe_error: true
|
||||
mute_process_io_error: true
|
||||
processes: {}
|
||||
prometheus:
|
||||
config:
|
||||
global:
|
||||
scrape_interval: 60s
|
||||
scrape_configs:
|
||||
- job_name: otel-collector-binary
|
||||
static_configs:
|
||||
- targets:
|
||||
# - localhost:8888
|
||||
processors:
|
||||
batch:
|
||||
send_batch_size: 1000
|
||||
timeout: 10s
|
||||
# Ref: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/processor/resourcedetectionprocessor/README.md
|
||||
resourcedetection:
|
||||
detectors: [env, azure, system]
|
||||
# Using OTEL_RESOURCE_ATTRIBUTES envvar, env detector adds custom labels.
|
||||
timeout: 2s
|
||||
system:
|
||||
hostname_sources: [dns, os]
|
||||
extensions:
|
||||
health_check: {}
|
||||
zpages: {}
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "<Central Collector DNS Name>:4318"
|
||||
logging:
|
||||
verbosity: normal
|
||||
service:
|
||||
telemetry:
|
||||
metrics:
|
||||
address: 0.0.0.0:8888
|
||||
extensions: [health_check, zpages]
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
metrics/internal:
|
||||
receivers: [prometheus, hostmetrics]
|
||||
processors: [resourcedetection, batch]
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, filelog]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
```
|
||||
|
||||
|
||||
#### OLTP Exporter Configuration
|
||||
Make sure to replace `<Central Collector DNS Name>` with the DNS name of your central collector that you set up earlier.
|
||||
|
||||
|
||||
|
||||
#### File Logs Receiver Configuration
|
||||
The file logs receiver needs to be configured with the paths to the log files that you want to stream to SigNoz. You can specify multiple paths by separating them as a array.
|
||||
|
||||
You can also specify globed path patterns to match multiple log files. For example, `/var/log/myservice/*.json` will match all log files in the `/var/log/myservice` directory with a `.json` extension.
|
||||
|
||||
|
||||
|
||||
### Start the OpenTelemetry Collector
|
||||
|
||||
Once we are done with the above configurations, we can now run the collector service with the following command:
|
||||
|
||||
```bash
|
||||
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Hostmetrics Dashboard
|
||||
|
||||
Once the collector is running, you can access the SigNoz dashboard to view the logs and metrics from your Azure VM.
|
||||
|
||||
Please refer to the [Hostmetrics Dashboard](https://signoz.io/docs/userguide/hostmetrics/) for information on how to import and use the dashboard.
|
||||
|
||||
@@ -0,0 +1,129 @@
|
||||
Set up the OpenTelemetry Collector on a Virtual Machine (VM). The setup is compatible with cloud VM instances, your own data center, or even a local VM on your development machine. Here's how to do it:
|
||||
|
||||
|
||||
## Download and Install the OpenTelemetry Collector Binary
|
||||
|
||||
Please visit [Documentation For VM](https://signoz.io/docs/tutorial/opentelemetry-binary-usage-in-virtual-machine/) which provides further guidance on a VM installation.
|
||||
|
||||
|
||||
|
||||
## Configure OpenTelemetry Collector
|
||||
|
||||
While following the documentation above for installing the OpenTelemetry Collector Binary, you must have created `config.yaml` file. Replace the content of the `config.yaml` with the below config file which includes the **Azure Monitor receiver**.
|
||||
|
||||
```yaml
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: 0.0.0.0:4317
|
||||
http:
|
||||
endpoint: 0.0.0.0:4318
|
||||
azureeventhub:
|
||||
connection: <Primary Connection String>
|
||||
format: "azure"
|
||||
azuremonitor:
|
||||
subscription_id: "<Subscription ID>"
|
||||
tenant_id: "<AD Tenant ID>"
|
||||
client_id: "<Client ID>"
|
||||
client_secret: "<Client Secret>"
|
||||
resource_groups: ["<rg-1>"]
|
||||
collection_interval: 60s
|
||||
processors:
|
||||
batch: {}
|
||||
exporters:
|
||||
otlp:
|
||||
endpoint: "ingest.{{REGION}}.signoz.cloud:443"
|
||||
tls:
|
||||
insecure: false
|
||||
headers:
|
||||
"signoz-access-token": "{{SIGNOZ_INGESTION_KEY}}"
|
||||
service:
|
||||
pipelines:
|
||||
metrics/am:
|
||||
receivers: [azuremonitor]
|
||||
exporters: [otlp]
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
logs:
|
||||
receivers: [otlp, azureeventhub]
|
||||
processors: [batch]
|
||||
exporters: [otlp]
|
||||
|
||||
```
|
||||
**NOTE:**
|
||||
Replace the `<Primary Connection String>` in the config file with the primary connection string for your Event Hub that you created in the previous section. It would look something like this:
|
||||
|
||||
```bash
|
||||
Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=;EntityPath=hubName
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Azure Monitor Receiver Configuration
|
||||
|
||||
You will need to set up a [service principal](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal) with Read permissions to receive data from Azure Monitor.
|
||||
|
||||
1. Follow the steps in the [Create a service principal Azure Doc](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#register-an-application-with-microsoft-entra-id-and-create-a-service-principal) documentation to create a service principal.
|
||||
You can name it `signoz-central-collector-app` the redirect URI can be empty.
|
||||
|
||||
2. To add read permissions to Azure Monitor, Follow the [Assign Role](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#assign-a-role-to-the-application) documentation. The read access can be given to the full subscription.
|
||||
|
||||
3. There are multiple ways to authenticate the service principal, we will use the client secret option, follow [Creating a client secret](https://learn.microsoft.com/en-us/entra/identity-platform/howto-create-service-principal-portal#option-3-create-a-new-client-secret) and don't forget to copy the client secret. The secret is used in the configuration file as `client_secret`.
|
||||
|
||||
4. To find `client_id` and `tenant_id`, go to the [Azure Portal](https://portal.azure.com/) and search for the `Application` you created. You would see the `Application (client) ID` and `Directory (tenant) ID` in the Overview section.
|
||||
|
||||
5. To find `subscription_id`, follow steps in [Find Your Subscription](https://learn.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription) and populate them in the configuration file.
|
||||
|
||||
**NOTE:**
|
||||
By following the above steps, you will get the values for `<Subscription ID>`, `<AD Tenant ID>`, `<Client ID>` and `<Client Secret>` which you need to fill in the `config.yaml` file.
|
||||
|
||||
|
||||
|
||||
## Run the Collector
|
||||
|
||||
With your configuration file ready, you can now start the Collector using the following command:
|
||||
|
||||
```bash
|
||||
# Runs in background with the configuration we just created
|
||||
./otelcol-contrib --config ./config.yaml &> otelcol-output.log & echo "$!" > otel-pid
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Open Ports
|
||||
|
||||
You will need to open the following ports on your Azure VM:
|
||||
- 4317 for gRPC
|
||||
- 4318 for HTTP
|
||||
|
||||
You can do this by navigating to the Azure VM's Networking section and adding a new inbound rule for the ports.
|
||||
|
||||
|
||||
|
||||
### Validating the Deployment
|
||||
|
||||
Once the Collector is running, ensure that telemetry data is being successfully sent and received. Use the logging exporter as defined in your configuration file, or check the logs for any startup errors.
|
||||
|
||||
|
||||
|
||||
## Configure DNS label For Collector
|
||||
|
||||
To the IP address of the collector, you can add a DNS label to the Public IP address. This will make it easier to refer to the centralized collector from other services. You can do this by following these steps:
|
||||
|
||||
1. Go to the Public IP address of the collector. This would be the IP address of the VM or Load Balancer in case of Kubernetes or Load Balanced collector.
|
||||
2. Click on the "Configuration" tab.
|
||||
3. Enter the DNS label you want to use for the collector.
|
||||
4. Click on "Save".
|
||||
|
||||
**NOTE:** Please take note of the DNS label you have entered. You will need this in the next steps.
|
||||
|
||||
|
||||
|
||||
If you encounter any difficulties, please refer to this [troubleshooting section](https://signoz.io/docs/azure-monitoring/bootstrapping/collector-setup/#troubleshooting)
|
||||
@@ -0,0 +1,54 @@
|
||||
## Overview
|
||||
|
||||
Azure Event Hubs is a big data streaming platform ideal for centralizing logging and real-time log streaming for applications on Azure or on-premises.
|
||||
|
||||
Integrate SigNoz with Azure Event Hubs for a robust log management solution, leveraging SigNoz's log aggregation, querying, visualization, and alerting features.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- An active Azure subscription
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Create an Event Hubs Namespace
|
||||
|
||||
1. In the [Azure portal](https://portal.azure.com), create an Event Hubs namespace.
|
||||
2. Fill in the required details:
|
||||
- **Resource group**: Choose or create a new one.
|
||||
- **Namespace name**: Enter a unique name, e.g., `<orgName>-obs-signoz`.
|
||||
- **Pricing tier**: Based on your logging requirements.
|
||||
- **Region**: Should match the region of the resources you want to monitor.
|
||||
- **Throughput units**: Choose based on logging needs.
|
||||
3. Click "Review + create" and then "Create".
|
||||
|
||||
### 2. Create an Event Hub
|
||||
|
||||
1. Navigate to the Event Hubs namespace you created in the Azure portal.
|
||||
2. Click "+ Event Hub" to create a new event hub.
|
||||
3. Enter a name, e.g., `logs`and click "Create"
|
||||
|
||||
### 3. Create a SAS Policy and Copy Connection String
|
||||
|
||||
1. Navigate to the Event Hub in the Azure portal.
|
||||
2. Click "Shared access policies" in the left menu.
|
||||
3. Click "Add" to create a new policy named `signozListen`.
|
||||
4. Select the "Listen" permission and set the expiration time.
|
||||
5. Click "Save".
|
||||
6. Copy the *Connection string–primary key*.
|
||||
|
||||
<!-- ### 4. Configure OpenTelemetry Integration
|
||||
|
||||
1. Add a new receiver to [Central Collector Setup](../collector-setup).
|
||||
2. Configure the receiver with the Event Hubs namespace connection string and the event hub name.
|
||||
|
||||
### 5. Stream Logs to Event Hubs
|
||||
|
||||
1. Configure Azure services' diagnostic settings to forward logs to the Event Hub.
|
||||
2. Ensure logs are in [Azure Common Log Format](https://learn.microsoft.com/en-us/azure/azure-monitor/essentials/resource-logs-schema).
|
||||
3. Verify logs are streaming to Event Hubs and received by SigNoz.
|
||||
|
||||
For detailed instructions, refer to the Azure documentation: [Azure Event Hub](https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-create). -->
|
||||
|
||||
<!-- For more configuration options, see the [OpenTelemetry Documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/azureeventhubreceiver). -->
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
|
||||
.onboardingHeader {
|
||||
text-align: center;
|
||||
margin-top: 48px;
|
||||
margin-top: 24px;
|
||||
}
|
||||
|
||||
.onboardingHeader h1 {
|
||||
@@ -51,13 +51,14 @@
|
||||
justify-content: center;
|
||||
gap: 36px;
|
||||
margin: 36px;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
|
||||
.moduleStyles {
|
||||
padding: 0;
|
||||
box-sizing: border-box;
|
||||
cursor: pointer;
|
||||
width: 400px;
|
||||
width: 300px;
|
||||
transition: 0.3s;
|
||||
|
||||
.ant-card-body {
|
||||
|
||||
@@ -25,6 +25,7 @@ import { DataSourceType } from './Steps/DataSource/DataSource';
|
||||
import {
|
||||
defaultApplicationDataSource,
|
||||
defaultAwsServices,
|
||||
defaultAzureServices,
|
||||
defaultInfraMetricsType,
|
||||
defaultLogsType,
|
||||
moduleRouteMap,
|
||||
@@ -32,6 +33,7 @@ import {
|
||||
import {
|
||||
APM_STEPS,
|
||||
AWS_MONITORING_STEPS,
|
||||
AZURE_MONITORING_STEPS,
|
||||
getSteps,
|
||||
INFRASTRUCTURE_MONITORING_STEPS,
|
||||
LOGS_MANAGEMENT_STEPS,
|
||||
@@ -42,6 +44,7 @@ export enum ModulesMap {
|
||||
LogsManagement = 'LogsManagement',
|
||||
InfrastructureMonitoring = 'InfrastructureMonitoring',
|
||||
AwsMonitoring = 'AwsMonitoring',
|
||||
AzureMonitoring = 'AzureMonitoring',
|
||||
}
|
||||
|
||||
export interface ModuleProps {
|
||||
@@ -81,6 +84,12 @@ export const useCases = {
|
||||
desc:
|
||||
'Monitor your traces, logs and metrics for AWS services like EC2, ECS, EKS etc.',
|
||||
},
|
||||
AzureMonitoring: {
|
||||
id: ModulesMap.AzureMonitoring,
|
||||
title: 'Azure Monitoring',
|
||||
desc:
|
||||
'Monitor your traces, logs and metrics for Azure services like AKS, Container Apps, App Service etc.',
|
||||
},
|
||||
};
|
||||
|
||||
export default function Onboarding(): JSX.Element {
|
||||
@@ -172,6 +181,7 @@ export default function Onboarding(): JSX.Element {
|
||||
setSelectedModuleSteps(APM_STEPS);
|
||||
};
|
||||
|
||||
// eslint-disable-next-line sonarjs/cognitive-complexity
|
||||
useEffect(() => {
|
||||
if (selectedModule?.id === ModulesMap.InfrastructureMonitoring) {
|
||||
if (selectedDataSource) {
|
||||
@@ -194,6 +204,13 @@ export default function Onboarding(): JSX.Element {
|
||||
setSelectedModuleSteps(AWS_MONITORING_STEPS);
|
||||
updateSelectedDataSource(defaultAwsServices);
|
||||
}
|
||||
} else if (selectedModule?.id === ModulesMap.AzureMonitoring) {
|
||||
if (selectedDataSource) {
|
||||
setModuleStepsBasedOnSelectedDataSource(selectedDataSource);
|
||||
} else {
|
||||
setSelectedModuleSteps(AZURE_MONITORING_STEPS);
|
||||
updateSelectedDataSource(defaultAzureServices);
|
||||
}
|
||||
} else if (selectedModule?.id === ModulesMap.APM) {
|
||||
handleAPMSteps();
|
||||
|
||||
@@ -240,18 +257,24 @@ export default function Onboarding(): JSX.Element {
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (location.pathname === ROUTES.GET_STARTED_APPLICATION_MONITORING) {
|
||||
const { pathname } = location;
|
||||
|
||||
if (pathname === ROUTES.GET_STARTED_APPLICATION_MONITORING) {
|
||||
handleModuleSelect(useCases.APM);
|
||||
updateSelectedDataSource(defaultApplicationDataSource);
|
||||
handleNextStep();
|
||||
} else if (
|
||||
location.pathname === ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING
|
||||
) {
|
||||
} else if (pathname === ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING) {
|
||||
handleModuleSelect(useCases.InfrastructureMonitoring);
|
||||
handleNextStep();
|
||||
} else if (location.pathname === ROUTES.GET_STARTED_LOGS_MANAGEMENT) {
|
||||
} else if (pathname === ROUTES.GET_STARTED_LOGS_MANAGEMENT) {
|
||||
handleModuleSelect(useCases.LogsManagement);
|
||||
handleNextStep();
|
||||
} else if (pathname === ROUTES.GET_STARTED_AWS_MONITORING) {
|
||||
handleModuleSelect(useCases.AwsMonitoring);
|
||||
handleNextStep();
|
||||
} else if (pathname === ROUTES.GET_STARTED_AZURE_MONITORING) {
|
||||
handleModuleSelect(useCases.AzureMonitoring);
|
||||
handleNextStep();
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import { MarkdownRenderer } from 'components/MarkdownRenderer/MarkdownRenderer';
|
||||
import { ApmDocFilePaths } from 'container/OnboardingContainer/constants/apmDocFilePaths';
|
||||
import { AwsMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/awsMonitoringDocFilePaths';
|
||||
import { AzureMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/azureMonitoringDocFilePaths';
|
||||
import { InfraMonitoringDocFilePaths } from 'container/OnboardingContainer/constants/infraMonitoringDocFilePaths';
|
||||
import { LogsManagementDocFilePaths } from 'container/OnboardingContainer/constants/logsManagementDocFilePaths';
|
||||
import {
|
||||
@@ -69,6 +70,8 @@ export default function MarkdownStep(): JSX.Element {
|
||||
docFilePaths = InfraMonitoringDocFilePaths;
|
||||
} else if (selectedModule?.id === ModulesMap.AwsMonitoring) {
|
||||
docFilePaths = AwsMonitoringDocFilePaths;
|
||||
} else if (selectedModule?.id === ModulesMap.AzureMonitoring) {
|
||||
docFilePaths = AzureMonitoringDocFilePaths;
|
||||
}
|
||||
// @ts-ignore
|
||||
if (docFilePaths && docFilePaths[path]) {
|
||||
|
||||
@@ -0,0 +1,82 @@
|
||||
import AzureMonitoring_azureAks_setupCentralCollector from '../Modules/AzureMonitoring/AKS/aks-installCentralCollector.md';
|
||||
import AzureMonitoring_azureAks_sendLogs from '../Modules/AzureMonitoring/AKS/aks-logs.md';
|
||||
import AzureMonitoring_azureAks_sendMetrics from '../Modules/AzureMonitoring/AKS/aks-metrics.md';
|
||||
import AzureMonitoring_azureAks_setupAzureEventsHub from '../Modules/AzureMonitoring/AKS/aks-setupEventsHub.md';
|
||||
import AzureMonitoring_azureAks_sendTraces from '../Modules/AzureMonitoring/AKS/aks-tracing.md';
|
||||
// Azure App Service
|
||||
import AzureMonitoring_azureAppService_setupCentralCollector from '../Modules/AzureMonitoring/AppService/appService-installCentralCollector.md';
|
||||
import AzureMonitoring_azureAppService_sendLogs from '../Modules/AzureMonitoring/AppService/appService-logs.md';
|
||||
import AzureMonitoring_azureAppService_sendMetrics from '../Modules/AzureMonitoring/AppService/appService-metrics.md';
|
||||
import AzureMonitoring_azureAppService_setupAzureEventsHub from '../Modules/AzureMonitoring/AppService/appService-setupEventsHub.md';
|
||||
import AzureMonitoring_azureAppService_sendTraces from '../Modules/AzureMonitoring/AppService/appService-tracing.md';
|
||||
// Azure Blob Storage
|
||||
import AzureMonitoring_azureBlobStorage_setupCentralCollector from '../Modules/AzureMonitoring/BlobStorage/blobStorage-installCentralCollector.md';
|
||||
import AzureMonitoring_azureBlobStorage_sendLogs from '../Modules/AzureMonitoring/BlobStorage/blobStorage-logs.md';
|
||||
import AzureMonitoring_azureBlobStorage_sendMetrics from '../Modules/AzureMonitoring/BlobStorage/blobStorage-metrics.md';
|
||||
import AzureMonitoring_azureBlobStorage_setupAzureEventsHub from '../Modules/AzureMonitoring/BlobStorage/blobStorage-setupEventsHub.md';
|
||||
// Azure Container Apps
|
||||
import AzureMonitoring_azureContainerApps_setupCentralCollector from '../Modules/AzureMonitoring/ContainerApps/containerApps-installCentralCollector.md';
|
||||
import AzureMonitoring_azureContainerApps_sendLogs from '../Modules/AzureMonitoring/ContainerApps/containerApps-logs.md';
|
||||
import AzureMonitoring_azureContainerApps_sendMetrics from '../Modules/AzureMonitoring/ContainerApps/containerApps-metrics.md';
|
||||
import AzureMonitoring_azureContainerApps_setupAzureEventsHub from '../Modules/AzureMonitoring/ContainerApps/containerApps-setupEventsHub.md';
|
||||
import AzureMonitoring_azureContainerApps_sendTraces from '../Modules/AzureMonitoring/ContainerApps/containerApps-tracing.md';
|
||||
// Azure Functions
|
||||
import AzureMonitoring_azureFunctions_setupCentralCollector from '../Modules/AzureMonitoring/Functions/functions-installCentralCollector.md';
|
||||
import AzureMonitoring_azureFunctions_sendLogs from '../Modules/AzureMonitoring/Functions/functions-logs.md';
|
||||
import AzureMonitoring_azureFunctions_sendMetrics from '../Modules/AzureMonitoring/Functions/functions-metrics.md';
|
||||
import AzureMonitoring_azureFunctions_setupAzureEventsHub from '../Modules/AzureMonitoring/Functions/functions-setupEventsHub.md';
|
||||
import AzureMonitoring_azureFunctions_sendTraces from '../Modules/AzureMonitoring/Functions/functions-tracing.md';
|
||||
// Azure SQL Database Metrics
|
||||
import AzureMonitoring_azureSQLDatabaseMetrics_setupCentralCollector from '../Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-installCentralCollector.md';
|
||||
import AzureMonitoring_azureSQLDatabaseMetrics_sendMetrics from '../Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-metrics.md';
|
||||
import AzureMonitoring_azureSQLDatabaseMetrics_setupAzureEventsHub from '../Modules/AzureMonitoring/SqlDatabaseMetrics/sqlDatabaseMetrics-setupEventsHub.md';
|
||||
import AzureMonitoring_azureVm_sendHostmetricsLogs from '../Modules/AzureMonitoring/Vm/vm-hostmetrics-and-logs.md';
|
||||
// Azure VM
|
||||
import AzureMonitoring_azureVm_setupCentralCollector from '../Modules/AzureMonitoring/Vm/vm-installCentralCollector.md';
|
||||
import AzureMonitoring_azureVm_setupAzureEventsHub from '../Modules/AzureMonitoring/Vm/vm-setupEventsHub.md';
|
||||
|
||||
export const AzureMonitoringDocFilePaths = {
|
||||
// Azure AKS
|
||||
AzureMonitoring_azureAks_setupCentralCollector,
|
||||
AzureMonitoring_azureAks_setupAzureEventsHub,
|
||||
AzureMonitoring_azureAks_sendTraces,
|
||||
AzureMonitoring_azureAks_sendLogs,
|
||||
AzureMonitoring_azureAks_sendMetrics,
|
||||
|
||||
// Azure App Service
|
||||
AzureMonitoring_azureFunctions_setupCentralCollector,
|
||||
AzureMonitoring_azureFunctions_setupAzureEventsHub,
|
||||
AzureMonitoring_azureFunctions_sendTraces,
|
||||
AzureMonitoring_azureFunctions_sendLogs,
|
||||
AzureMonitoring_azureFunctions_sendMetrics,
|
||||
|
||||
// Azure Functions
|
||||
AzureMonitoring_azureAppService_setupCentralCollector,
|
||||
AzureMonitoring_azureAppService_setupAzureEventsHub,
|
||||
AzureMonitoring_azureAppService_sendTraces,
|
||||
AzureMonitoring_azureAppService_sendLogs,
|
||||
AzureMonitoring_azureAppService_sendMetrics,
|
||||
|
||||
// Azure Container Apps
|
||||
AzureMonitoring_azureContainerApps_setupCentralCollector,
|
||||
AzureMonitoring_azureContainerApps_setupAzureEventsHub,
|
||||
AzureMonitoring_azureContainerApps_sendTraces,
|
||||
AzureMonitoring_azureContainerApps_sendLogs,
|
||||
AzureMonitoring_azureContainerApps_sendMetrics,
|
||||
|
||||
// Azure VM
|
||||
AzureMonitoring_azureVm_setupCentralCollector,
|
||||
AzureMonitoring_azureVm_setupAzureEventsHub,
|
||||
AzureMonitoring_azureVm_sendHostmetricsLogs,
|
||||
|
||||
// Azure SQL Database Metrics
|
||||
AzureMonitoring_azureSQLDatabaseMetrics_setupCentralCollector,
|
||||
AzureMonitoring_azureSQLDatabaseMetrics_setupAzureEventsHub,
|
||||
AzureMonitoring_azureSQLDatabaseMetrics_sendMetrics,
|
||||
|
||||
// Azure Blob Storage
|
||||
AzureMonitoring_azureBlobStorage_setupCentralCollector,
|
||||
AzureMonitoring_azureBlobStorage_setupAzureEventsHub,
|
||||
AzureMonitoring_azureBlobStorage_sendLogs,
|
||||
AzureMonitoring_azureBlobStorage_sendMetrics,
|
||||
};
|
||||
@@ -35,6 +35,12 @@ export const stepsMap = {
|
||||
deployTaskDefinition: `deployTaskDefinition`,
|
||||
ecsSendLogsData: `ecsSendLogsData`,
|
||||
monitorDashboard: `monitorDashboard`,
|
||||
setupCentralCollector: `setupCentralCollector`,
|
||||
setupAzureEventsHub: `setupAzureEventsHub`,
|
||||
sendTraces: `sendTraces`,
|
||||
sendLogs: `sendLogs`,
|
||||
sendMetrics: `sendMetrics`,
|
||||
sendHostmetricsLogs: `sendHostmetricsLogs`,
|
||||
};
|
||||
|
||||
export const DataSourceStep: SelectedModuleStepProps = {
|
||||
@@ -201,3 +207,33 @@ export const MonitorDashboard: SelectedModuleStepProps = {
|
||||
title: 'Monitor using Dashboard ',
|
||||
component: <MarkdownStep />,
|
||||
};
|
||||
export const SetupCentralCollectorStep: SelectedModuleStepProps = {
|
||||
id: stepsMap.setupCentralCollector,
|
||||
title: 'Setup Central Collector ',
|
||||
component: <MarkdownStep />,
|
||||
};
|
||||
export const SetupAzureEventsHub: SelectedModuleStepProps = {
|
||||
id: stepsMap.setupAzureEventsHub,
|
||||
title: 'Setup EventsHub',
|
||||
component: <MarkdownStep />,
|
||||
};
|
||||
export const SendTraces: SelectedModuleStepProps = {
|
||||
id: stepsMap.sendTraces,
|
||||
title: 'Send Traces',
|
||||
component: <MarkdownStep />,
|
||||
};
|
||||
export const SendLogs: SelectedModuleStepProps = {
|
||||
id: stepsMap.sendLogs,
|
||||
title: 'Send Logs',
|
||||
component: <MarkdownStep />,
|
||||
};
|
||||
export const SendMetrics: SelectedModuleStepProps = {
|
||||
id: stepsMap.sendMetrics,
|
||||
title: 'Send Metrics',
|
||||
component: <MarkdownStep />,
|
||||
};
|
||||
export const SendHostmetricsLogs: SelectedModuleStepProps = {
|
||||
id: stepsMap.sendHostmetricsLogs,
|
||||
title: 'HostMetrics and Logging',
|
||||
component: <MarkdownStep />,
|
||||
};
|
||||
|
||||
@@ -8,6 +8,7 @@ export enum ModulesMap {
|
||||
LogsManagement = 'LogsManagement',
|
||||
InfrastructureMonitoring = 'InfrastructureMonitoring',
|
||||
AwsMonitoring = 'AwsMonitoring',
|
||||
AzureMonitoring = 'AzureMonitoring',
|
||||
}
|
||||
|
||||
export const frameworksMap = {
|
||||
@@ -82,6 +83,7 @@ export const frameworksMap = {
|
||||
LogsManagement: {},
|
||||
InfrastructureMonitoring: {},
|
||||
AwsMonitoring: {},
|
||||
AzureMonitoring: {},
|
||||
};
|
||||
|
||||
export const defaultApplicationDataSource = {
|
||||
@@ -270,6 +272,50 @@ const supportedAwsServices = [
|
||||
},
|
||||
];
|
||||
|
||||
export const defaultAzureServices = {
|
||||
name: 'VM',
|
||||
id: 'azureVm',
|
||||
imgURL: `/Logos/azure-vm.svg`,
|
||||
};
|
||||
|
||||
const supportedAzureServices = [
|
||||
{
|
||||
name: 'VM',
|
||||
id: 'azureVm',
|
||||
imgURL: `/Logos/azure-vm.svg`,
|
||||
},
|
||||
{
|
||||
name: 'App Service',
|
||||
id: 'azureAppService',
|
||||
imgURL: `/Logos/azure-app-service.svg`,
|
||||
},
|
||||
{
|
||||
name: 'AKS',
|
||||
id: 'azureAks',
|
||||
imgURL: `/Logos/azure-aks.svg`,
|
||||
},
|
||||
{
|
||||
name: 'Azure Functions',
|
||||
id: 'azureFunctions',
|
||||
imgURL: `/Logos/azure-functions.svg`,
|
||||
},
|
||||
{
|
||||
name: 'Azure Container Apps',
|
||||
id: 'azureContainerApps',
|
||||
imgURL: `/Logos/azure-container-apps.svg`,
|
||||
},
|
||||
{
|
||||
name: 'SQL Database Metrics',
|
||||
id: 'azureSQLDatabaseMetrics',
|
||||
imgURL: `/Logos/azure-sql-database-metrics.svg`,
|
||||
},
|
||||
{
|
||||
name: 'Azure Blob Storage',
|
||||
id: 'azureBlobStorage',
|
||||
imgURL: `/Logos/azure-blob-storage.svg`,
|
||||
},
|
||||
];
|
||||
|
||||
export const getDataSources = (module: ModuleProps): DataSourceType[] => {
|
||||
if (module.id === ModulesMap.APM) {
|
||||
return supportedLanguages;
|
||||
@@ -283,7 +329,11 @@ export const getDataSources = (module: ModuleProps): DataSourceType[] => {
|
||||
return supportedLogsTypes;
|
||||
}
|
||||
|
||||
return supportedAwsServices;
|
||||
if (module.id === ModulesMap.AwsMonitoring) {
|
||||
return supportedAwsServices;
|
||||
}
|
||||
|
||||
return supportedAzureServices;
|
||||
};
|
||||
|
||||
export const getSupportedFrameworks = ({
|
||||
@@ -347,4 +397,5 @@ export const moduleRouteMap = {
|
||||
[ModulesMap.InfrastructureMonitoring]:
|
||||
ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING,
|
||||
[ModulesMap.AwsMonitoring]: ROUTES.GET_STARTED_AWS_MONITORING,
|
||||
[ModulesMap.AzureMonitoring]: ROUTES.GET_STARTED_AZURE_MONITORING,
|
||||
};
|
||||
|
||||
@@ -22,7 +22,13 @@ import {
|
||||
RestartOtelCollector,
|
||||
RunApplicationStep,
|
||||
SelectMethodStep,
|
||||
SendHostmetricsLogs,
|
||||
SendLogs,
|
||||
SendLogsCloudwatch,
|
||||
SendMetrics,
|
||||
SendTraces,
|
||||
SetupAzureEventsHub,
|
||||
SetupCentralCollectorStep,
|
||||
SetupDaemonService,
|
||||
SetupLogDrains,
|
||||
SetupOtelCollectorStep,
|
||||
@@ -57,6 +63,10 @@ export const INFRASTRUCTURE_MONITORING_STEPS: SelectedModuleStepProps[] = [
|
||||
|
||||
export const AWS_MONITORING_STEPS: SelectedModuleStepProps[] = [DataSourceStep];
|
||||
|
||||
export const AZURE_MONITORING_STEPS: SelectedModuleStepProps[] = [
|
||||
DataSourceStep,
|
||||
];
|
||||
|
||||
export const getSteps = ({
|
||||
selectedDataSource,
|
||||
}: GetStepsProps): SelectedModuleStepProps[] => {
|
||||
@@ -144,6 +154,70 @@ export const getSteps = ({
|
||||
];
|
||||
case 'awsEks':
|
||||
return [DataSourceStep, SetupOtelCollectorStep, MonitorDashboard];
|
||||
case 'azureVm':
|
||||
return [
|
||||
DataSourceStep,
|
||||
SetupAzureEventsHub,
|
||||
SetupCentralCollectorStep,
|
||||
SendHostmetricsLogs,
|
||||
];
|
||||
// eslint-disable-next-line sonarjs/no-duplicated-branches
|
||||
case 'azureAks':
|
||||
return [
|
||||
DataSourceStep,
|
||||
SetupAzureEventsHub,
|
||||
SetupCentralCollectorStep,
|
||||
SendTraces,
|
||||
SendLogs,
|
||||
SendMetrics,
|
||||
];
|
||||
// eslint-disable-next-line sonarjs/no-duplicated-branches
|
||||
case 'azureAppService':
|
||||
return [
|
||||
DataSourceStep,
|
||||
SetupAzureEventsHub,
|
||||
SetupCentralCollectorStep,
|
||||
SendTraces,
|
||||
SendLogs,
|
||||
SendMetrics,
|
||||
];
|
||||
// eslint-disable-next-line sonarjs/no-duplicated-branches
|
||||
case 'azureFunctions':
|
||||
return [
|
||||
DataSourceStep,
|
||||
SetupAzureEventsHub,
|
||||
SetupCentralCollectorStep,
|
||||
SendTraces,
|
||||
SendLogs,
|
||||
SendMetrics,
|
||||
];
|
||||
// eslint-disable-next-line sonarjs/no-duplicated-branches
|
||||
case 'azureContainerApps':
|
||||
return [
|
||||
DataSourceStep,
|
||||
SetupAzureEventsHub,
|
||||
SetupCentralCollectorStep,
|
||||
SendTraces,
|
||||
SendLogs,
|
||||
SendMetrics,
|
||||
];
|
||||
// eslint-disable-next-line sonarjs/no-duplicated-branches
|
||||
case 'azureBlobStorage':
|
||||
return [
|
||||
DataSourceStep,
|
||||
SetupAzureEventsHub,
|
||||
SetupCentralCollectorStep,
|
||||
SendLogs,
|
||||
SendMetrics,
|
||||
];
|
||||
// eslint-disable-next-line sonarjs/no-duplicated-branches
|
||||
case 'azureSQLDatabaseMetrics':
|
||||
return [
|
||||
DataSourceStep,
|
||||
SetupAzureEventsHub,
|
||||
SetupCentralCollectorStep,
|
||||
SendMetrics,
|
||||
];
|
||||
|
||||
default:
|
||||
return [DataSourceStep];
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import * as Sentry from '@sentry/react';
|
||||
import { FeatureKeys } from 'constants/features';
|
||||
import useFeatureFlag from 'hooks/useFeatureFlag';
|
||||
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
|
||||
import ServiceMetrics from './ServiceMetrics';
|
||||
import ServiceTraces from './ServiceTraces';
|
||||
@@ -12,11 +12,11 @@ function Services(): JSX.Element {
|
||||
?.active;
|
||||
|
||||
return (
|
||||
<ErrorBoundary FallbackComponent={ErrorBoundaryFallback}>
|
||||
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
|
||||
<Container style={{ marginTop: 0 }}>
|
||||
{isSpanMetricEnabled ? <ServiceMetrics /> : <ServiceTraces />}
|
||||
</Container>
|
||||
</ErrorBoundary>
|
||||
</Sentry.ErrorBoundary>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -125,8 +125,8 @@ const menuItems: SidebarItem[] = [
|
||||
|
||||
/** Mapping of some newly added routes and their corresponding active sidebar menu key */
|
||||
export const NEW_ROUTES_MENU_ITEM_KEY_MAP: Record<string, string> = {
|
||||
[ROUTES.TRACES_EXPLORER]: ROUTES.TRACE,
|
||||
[ROUTES.TRACE_EXPLORER]: ROUTES.TRACE,
|
||||
[ROUTES.TRACE]: ROUTES.TRACES_EXPLORER,
|
||||
[ROUTES.TRACE_EXPLORER]: ROUTES.TRACES_EXPLORER,
|
||||
[ROUTES.LOGS_BASE]: ROUTES.LOGS_EXPLORER,
|
||||
};
|
||||
|
||||
|
||||
@@ -112,6 +112,7 @@ export const routesToSkip = [
|
||||
ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING,
|
||||
ROUTES.GET_STARTED_LOGS_MANAGEMENT,
|
||||
ROUTES.GET_STARTED_AWS_MONITORING,
|
||||
ROUTES.GET_STARTED_AZURE_MONITORING,
|
||||
ROUTES.VERSION,
|
||||
ROUTES.ALL_DASHBOARD,
|
||||
ROUTES.ORG_SETTINGS,
|
||||
|
||||
@@ -181,6 +181,7 @@ export const routesToSkip = [
|
||||
ROUTES.GET_STARTED_INFRASTRUCTURE_MONITORING,
|
||||
ROUTES.GET_STARTED_LOGS_MANAGEMENT,
|
||||
ROUTES.GET_STARTED_AWS_MONITORING,
|
||||
ROUTES.GET_STARTED_AZURE_MONITORING,
|
||||
ROUTES.VERSION,
|
||||
ROUTES.ALL_DASHBOARD,
|
||||
ROUTES.ORG_SETTINGS,
|
||||
|
||||
@@ -1,7 +1,17 @@
|
||||
.span-details-sider {
|
||||
padding-top: 16px;
|
||||
|
||||
::-webkit-scrollbar {
|
||||
width: 0.2em;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-track {
|
||||
box-shadow: inset 0 0 6px rgba(18, 18, 18, 0.3);
|
||||
}
|
||||
|
||||
&.dark {
|
||||
.ant-layout-sider-trigger {
|
||||
background-color: black !important;
|
||||
background-color: #0b0c0e !important;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -246,13 +246,14 @@ function TraceDetail({ response }: TraceDetailProps): JSX.Element {
|
||||
|
||||
<Sider
|
||||
className={cx('span-details-sider', isDarkMode ? 'dark' : 'light')}
|
||||
style={{ background: isDarkMode ? '#000' : '#fff' }}
|
||||
style={{ background: isDarkMode ? '#0b0c0e' : '#fff' }}
|
||||
theme={isDarkMode ? 'dark' : 'light'}
|
||||
collapsible
|
||||
collapsed={collapsed}
|
||||
reverseArrow
|
||||
width={300}
|
||||
collapsedWidth={40}
|
||||
defaultCollapsed
|
||||
onCollapse={(value): void => setCollapsed(value)}
|
||||
>
|
||||
{!collapsed && (
|
||||
|
||||
@@ -3,6 +3,7 @@ import { ColumnsType } from 'antd/es/table';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { getMs } from 'container/Trace/Filters/Panel/PanelBody/Duration/util';
|
||||
import { formUrlParams } from 'container/TraceDetail/utils';
|
||||
import dayjs from 'dayjs';
|
||||
import { RowData } from 'lib/query/createTableColumnsFromQuery';
|
||||
import { ILog } from 'types/api/logs/log';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
@@ -30,6 +31,13 @@ export const getListColumns = (
|
||||
key: 'date',
|
||||
title: 'Timestamp',
|
||||
width: 145,
|
||||
render: (item): JSX.Element => {
|
||||
const date =
|
||||
typeof item === 'string'
|
||||
? dayjs(item).format('YYYY-MM-DD HH:mm:ss.SSS')
|
||||
: dayjs(item / 1e6).format('YYYY-MM-DD HH:mm:ss.SSS');
|
||||
return <Typography.Text>{date}</Typography.Text>;
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ import { ColumnsType } from 'antd/es/table';
|
||||
import ROUTES from 'constants/routes';
|
||||
import { getMs } from 'container/Trace/Filters/Panel/PanelBody/Duration/util';
|
||||
import { DEFAULT_PER_PAGE_OPTIONS } from 'hooks/queryPagination';
|
||||
import { generatePath } from 'react-router-dom';
|
||||
import { generatePath, Link } from 'react-router-dom';
|
||||
import { ListItem } from 'types/api/widgets/getQuery';
|
||||
|
||||
export const PER_PAGE_OPTIONS: number[] = [10, ...DEFAULT_PER_PAGE_OPTIONS];
|
||||
@@ -38,14 +38,14 @@ export const columns: ColumnsType<ListItem['data']> = [
|
||||
dataIndex: 'traceID',
|
||||
key: 'traceID',
|
||||
render: (traceID: string): JSX.Element => (
|
||||
<Typography.Link
|
||||
href={generatePath(ROUTES.TRACE_DETAIL, {
|
||||
<Link
|
||||
to={generatePath(ROUTES.TRACE_DETAIL, {
|
||||
id: traceID,
|
||||
})}
|
||||
data-testid="trace-id"
|
||||
>
|
||||
{traceID}
|
||||
</Typography.Link>
|
||||
</Link>
|
||||
),
|
||||
},
|
||||
];
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
height: 100%;
|
||||
|
||||
.resize-table {
|
||||
height: calc(100% - 40px);
|
||||
height: calc(100% - 70px);
|
||||
overflow: scroll;
|
||||
overflow-x: hidden;
|
||||
|
||||
|
||||
@@ -1,43 +1,61 @@
|
||||
import { PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import { convertKeysToColumnFields } from 'container/LogsExplorerList/utils';
|
||||
import { Dashboard } from 'types/api/dashboard/getAll';
|
||||
import { BaseAutocompleteData } from 'types/api/queryBuilder/queryAutocompleteResponse';
|
||||
import { Query } from 'types/api/queryBuilder/queryBuilderData';
|
||||
|
||||
const baseLogsSelectedColumns = {
|
||||
dataType: 'string',
|
||||
type: '',
|
||||
name: 'timestamp',
|
||||
};
|
||||
|
||||
export const addEmptyWidgetInDashboardJSONWithQuery = (
|
||||
dashboard: Dashboard,
|
||||
query: Query,
|
||||
widgetId: string,
|
||||
panelTypes?: PANEL_TYPES,
|
||||
): Dashboard => ({
|
||||
...dashboard,
|
||||
data: {
|
||||
...dashboard.data,
|
||||
layout: [
|
||||
{
|
||||
i: widgetId,
|
||||
w: 6,
|
||||
x: 0,
|
||||
h: 6,
|
||||
y: 0,
|
||||
},
|
||||
...(dashboard?.data?.layout || []),
|
||||
],
|
||||
widgets: [
|
||||
...(dashboard?.data?.widgets || []),
|
||||
{
|
||||
id: widgetId,
|
||||
query,
|
||||
description: '',
|
||||
isStacked: false,
|
||||
nullZeroValues: '',
|
||||
opacity: '',
|
||||
title: '',
|
||||
timePreferance: 'GLOBAL_TIME',
|
||||
panelTypes: panelTypes || PANEL_TYPES.TIME_SERIES,
|
||||
softMax: null,
|
||||
softMin: null,
|
||||
selectedLogFields: [],
|
||||
selectedTracesFields: [],
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
panelType?: PANEL_TYPES,
|
||||
selectedColumns?: BaseAutocompleteData[] | null,
|
||||
): Dashboard => {
|
||||
const logsSelectedColumns = [
|
||||
baseLogsSelectedColumns,
|
||||
...convertKeysToColumnFields(selectedColumns || []),
|
||||
];
|
||||
|
||||
return {
|
||||
...dashboard,
|
||||
data: {
|
||||
...dashboard.data,
|
||||
layout: [
|
||||
{
|
||||
i: widgetId,
|
||||
w: 6,
|
||||
x: 0,
|
||||
h: 6,
|
||||
y: 0,
|
||||
},
|
||||
...(dashboard?.data?.layout || []),
|
||||
],
|
||||
widgets: [
|
||||
...(dashboard?.data?.widgets || []),
|
||||
{
|
||||
id: widgetId,
|
||||
query,
|
||||
description: '',
|
||||
isStacked: false,
|
||||
nullZeroValues: '',
|
||||
opacity: '',
|
||||
title: '',
|
||||
timePreferance: 'GLOBAL_TIME',
|
||||
panelTypes: panelType || PANEL_TYPES.TIME_SERIES,
|
||||
softMax: null,
|
||||
softMin: null,
|
||||
selectedLogFields:
|
||||
panelType === PANEL_TYPES.LIST ? logsSelectedColumns : [],
|
||||
selectedTracesFields:
|
||||
panelType === PANEL_TYPES.LIST ? selectedColumns || [] : [],
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
@@ -7,7 +7,6 @@ import { AxiosError } from 'axios';
|
||||
import { ThemeProvider } from 'hooks/useDarkMode';
|
||||
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
|
||||
import { createRoot } from 'react-dom/client';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
import { HelmetProvider } from 'react-helmet-async';
|
||||
import { QueryClient, QueryClientProvider } from 'react-query';
|
||||
import { Provider } from 'react-redux';
|
||||
@@ -58,7 +57,7 @@ if (container) {
|
||||
const root = createRoot(container);
|
||||
|
||||
root.render(
|
||||
<ErrorBoundary FallbackComponent={ErrorBoundaryFallback}>
|
||||
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
|
||||
<HelmetProvider>
|
||||
<ThemeProvider>
|
||||
<QueryClientProvider client={queryClient}>
|
||||
@@ -68,6 +67,6 @@ if (container) {
|
||||
</QueryClientProvider>
|
||||
</ThemeProvider>
|
||||
</HelmetProvider>
|
||||
</ErrorBoundary>,
|
||||
</Sentry.ErrorBoundary>,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -75,6 +75,7 @@ export interface GetQueryResultsProps {
|
||||
globalSelectedInterval: Time | TimeV2 | CustomTimeType;
|
||||
variables?: Record<string, unknown>;
|
||||
params?: Record<string, unknown>;
|
||||
fillGaps?: boolean;
|
||||
tableParams?: {
|
||||
pagination?: Pagination;
|
||||
selectColumns?: any;
|
||||
|
||||
@@ -20,6 +20,7 @@ export const prepareQueryRangePayload = ({
|
||||
tableParams,
|
||||
variables = {},
|
||||
params = {},
|
||||
fillGaps = false,
|
||||
}: GetQueryResultsProps): PrepareQueryRangePayload => {
|
||||
let legendMap: Record<string, string> = {};
|
||||
const { allowSelectedIntervalForStepGen, ...restParams } = params;
|
||||
@@ -27,6 +28,7 @@ export const prepareQueryRangePayload = ({
|
||||
const compositeQuery: QueryRangePayload['compositeQuery'] = {
|
||||
queryType: query.queryType,
|
||||
panelType: graphType,
|
||||
fillGaps,
|
||||
};
|
||||
|
||||
switch (query.queryType) {
|
||||
|
||||
@@ -365,6 +365,7 @@ const fillRestAggregationData = (
|
||||
queryTableData: QueryDataV3[],
|
||||
seria: SeriesItem,
|
||||
equalQueriesByLabels: string[],
|
||||
isEqualQuery: boolean,
|
||||
): void => {
|
||||
const nextQueryData =
|
||||
queryTableData.find((q) => q.queryName === column.field) || null;
|
||||
@@ -374,13 +375,13 @@ const fillRestAggregationData = (
|
||||
nextQueryData,
|
||||
);
|
||||
|
||||
const isEqual = isEqualQueriesByLabel(equalQueriesByLabels, column.field);
|
||||
if (targetSeria) {
|
||||
const isEqual = isEqualQueriesByLabel(equalQueriesByLabels, column.field);
|
||||
if (!isEqual) {
|
||||
// This line is crucial. It ensures that no additional rows are added to the table for similar labels across all formulas here is how this check is applied: signoz/frontend/src/lib/query/createTableColumnsFromQuery.ts line number 370
|
||||
equalQueriesByLabels.push(column.field);
|
||||
}
|
||||
} else {
|
||||
} else if (!isEqualQuery) {
|
||||
column.data.push('N/A');
|
||||
}
|
||||
};
|
||||
@@ -435,6 +436,7 @@ const fillDataFromSeries = (
|
||||
queryTableData,
|
||||
seria,
|
||||
equalQueriesByLabels,
|
||||
isEqualQuery,
|
||||
);
|
||||
|
||||
return;
|
||||
@@ -570,6 +572,29 @@ export const createTableColumnsFromQuery: CreateTableDataFromQuery = ({
|
||||
a.queryName < b.queryName ? -1 : 1,
|
||||
);
|
||||
|
||||
// the reason we need this is because the filling of values in rows doesn't account for mismatch enteries
|
||||
// in the response. Example : Series A -> [label1, label2] and Series B -> [label2,label1] this isn't accounted for
|
||||
sortedQueryTableData.forEach((q) => {
|
||||
q.series?.forEach((s) => {
|
||||
s.labelsArray?.sort((a, b) =>
|
||||
Object.keys(a)[0] < Object.keys(b)[0] ? -1 : 1,
|
||||
);
|
||||
});
|
||||
q.series?.sort((a, b) => {
|
||||
let labelA = '';
|
||||
let labelB = '';
|
||||
a.labelsArray.forEach((lab) => {
|
||||
labelA += Object.values(lab)[0];
|
||||
});
|
||||
|
||||
b.labelsArray.forEach((lab) => {
|
||||
labelB += Object.values(lab)[0];
|
||||
});
|
||||
|
||||
return labelA < labelB ? -1 : 1;
|
||||
});
|
||||
});
|
||||
|
||||
const dynamicColumns = getDynamicColumns(sortedQueryTableData, query);
|
||||
|
||||
const { filledDynamicColumns, rowsLength } = fillColumnsData(
|
||||
|
||||
@@ -68,7 +68,8 @@ function getStackedSeries(apiResponse: QueryData[]): QueryData[] {
|
||||
const { values } = series[i];
|
||||
for (let j = 0; j < values.length; j++) {
|
||||
values[j][1] = String(
|
||||
parseFloat(values[j]?.[1]) + parseFloat(series[i + 1].values[j]?.[1]),
|
||||
parseFloat(values[j]?.[1] || '0') +
|
||||
parseFloat(series[i + 1].values[j]?.[1] || '0'),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -88,7 +89,8 @@ function getStackedSeriesQueryFormat(apiResponse: QueryData[]): QueryData[] {
|
||||
const { values } = series[i];
|
||||
for (let j = 0; j < values.length; j++) {
|
||||
values[j].value = String(
|
||||
parseFloat(values[j].value) + parseFloat(series[i + 1].values[j].value),
|
||||
parseFloat(values[j]?.value || '0') +
|
||||
parseFloat(series[i + 1].values[j]?.value || '0'),
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -17,11 +17,7 @@ function getXAxisTimestamps(seriesList: QueryData[]): number[] {
|
||||
return timestampsArr.sort((a, b) => a - b);
|
||||
}
|
||||
|
||||
function fillMissingXAxisTimestamps(
|
||||
timestampArr: number[],
|
||||
data: any[],
|
||||
fillSpans: boolean,
|
||||
): any {
|
||||
function fillMissingXAxisTimestamps(timestampArr: number[], data: any[]): any {
|
||||
// Generate a set of all timestamps in the range
|
||||
const allTimestampsSet = new Set(timestampArr);
|
||||
const processedData = JSON.parse(JSON.stringify(data));
|
||||
@@ -35,14 +31,14 @@ function fillMissingXAxisTimestamps(
|
||||
);
|
||||
|
||||
missingTimestamps.forEach((timestamp) => {
|
||||
const value = fillSpans ? 0 : null;
|
||||
const value = null;
|
||||
|
||||
entry.values.push([timestamp, value]);
|
||||
});
|
||||
|
||||
entry.values.forEach((v) => {
|
||||
if (Number.isNaN(v[1])) {
|
||||
const replaceValue = fillSpans ? 0 : null;
|
||||
const replaceValue = null;
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
v[1] = replaceValue;
|
||||
} else if (v[1] !== null) {
|
||||
@@ -85,11 +81,7 @@ export const getUPlotChartData = (
|
||||
): any[] => {
|
||||
const seriesList = apiResponse?.data?.result || [];
|
||||
const timestampArr = getXAxisTimestamps(seriesList);
|
||||
const yAxisValuesArr = fillMissingXAxisTimestamps(
|
||||
timestampArr,
|
||||
seriesList,
|
||||
fillSpans || false,
|
||||
);
|
||||
const yAxisValuesArr = fillMissingXAxisTimestamps(timestampArr, seriesList);
|
||||
|
||||
return [
|
||||
timestampArr,
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import './LogsExplorer.styles.scss';
|
||||
|
||||
import * as Sentry from '@sentry/react';
|
||||
import ExplorerCard from 'components/ExplorerCard/ExplorerCard';
|
||||
import LogExplorerQuerySection from 'container/LogExplorerQuerySection';
|
||||
import LogsExplorerViews from 'container/LogsExplorerViews';
|
||||
@@ -9,7 +10,6 @@ import Toolbar from 'container/Toolbar/Toolbar';
|
||||
import { useQueryBuilder } from 'hooks/queryBuilder/useQueryBuilder';
|
||||
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
|
||||
import { useEffect, useMemo, useState } from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
|
||||
import { WrapperStyled } from './styles';
|
||||
@@ -70,7 +70,7 @@ function LogsExplorer(): JSX.Element {
|
||||
);
|
||||
|
||||
return (
|
||||
<ErrorBoundary FallbackComponent={ErrorBoundaryFallback}>
|
||||
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
|
||||
<Toolbar
|
||||
showAutoRefresh={false}
|
||||
leftActions={
|
||||
@@ -101,7 +101,7 @@ function LogsExplorer(): JSX.Element {
|
||||
</div>
|
||||
</div>
|
||||
</WrapperStyled>
|
||||
</ErrorBoundary>
|
||||
</Sentry.ErrorBoundary>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import './Pipelines.styles.scss';
|
||||
|
||||
import * as Sentry from '@sentry/react';
|
||||
import type { TabsProps } from 'antd';
|
||||
import { Tabs } from 'antd';
|
||||
import getPipeline from 'api/pipeline/get';
|
||||
@@ -9,7 +10,6 @@ import PipelinePage from 'container/PipelinePage/Layouts/Pipeline';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
|
||||
import { useEffect, useMemo } from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
import { useTranslation } from 'react-i18next';
|
||||
import { useQuery } from 'react-query';
|
||||
import { SuccessResponse } from 'types/api';
|
||||
@@ -82,13 +82,13 @@ function Pipelines(): JSX.Element {
|
||||
}
|
||||
|
||||
return (
|
||||
<ErrorBoundary FallbackComponent={ErrorBoundaryFallback}>
|
||||
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
|
||||
<Tabs
|
||||
className="pipeline-tabs"
|
||||
defaultActiveKey="pipelines"
|
||||
items={tabItems}
|
||||
/>
|
||||
</ErrorBoundary>
|
||||
</Sentry.ErrorBoundary>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import * as Sentry from '@sentry/react';
|
||||
import { Card } from 'antd';
|
||||
import { NotificationInstance } from 'antd/es/notification/interface';
|
||||
import ROUTES from 'constants/routes';
|
||||
@@ -11,7 +12,6 @@ import getStep from 'lib/getStep';
|
||||
import history from 'lib/history';
|
||||
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
|
||||
import { MouseEventHandler, useCallback, useEffect, useState } from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
import { connect, useDispatch, useSelector } from 'react-redux';
|
||||
import { bindActionCreators, Dispatch } from 'redux';
|
||||
import { ThunkDispatch } from 'redux-thunk';
|
||||
@@ -146,7 +146,7 @@ function Trace({
|
||||
);
|
||||
|
||||
return (
|
||||
<ErrorBoundary FallbackComponent={ErrorBoundaryFallback}>
|
||||
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
|
||||
<Search />
|
||||
<Container>
|
||||
<div>
|
||||
@@ -169,7 +169,7 @@ function Trace({
|
||||
</Card>
|
||||
</RightContainer>
|
||||
</Container>
|
||||
</ErrorBoundary>
|
||||
</Sentry.ErrorBoundary>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import { Button, Card, Checkbox, Input, Tooltip } from 'antd';
|
||||
import { CheckboxChangeEvent } from 'antd/es/checkbox';
|
||||
import { ParaGraph } from 'container/Trace/Filters/Panel/PanelBody/Common/styles';
|
||||
import useDebouncedFn from 'hooks/useDebouncedFunction';
|
||||
import { defaultTo, isEmpty } from 'lodash-es';
|
||||
import { isArray, isEmpty } from 'lodash-es';
|
||||
import {
|
||||
ChangeEvent,
|
||||
Dispatch,
|
||||
@@ -17,6 +17,7 @@ import {
|
||||
import {
|
||||
addFilter,
|
||||
AllTraceFilterKeys,
|
||||
convertToStringArr,
|
||||
FilterType,
|
||||
HandleRunProps,
|
||||
removeFilter,
|
||||
@@ -37,15 +38,14 @@ export function SectionBody(props: SectionBodyProps): JSX.Element {
|
||||
const [searchFilter, setSearchFilter] = useState<string>('');
|
||||
const [searchText, setSearchText] = useState<string>('');
|
||||
const [checkedItems, setCheckedItems] = useState<string[]>(
|
||||
defaultTo(selectedFilters?.[type]?.values as string[], []),
|
||||
convertToStringArr(selectedFilters?.[type]?.values),
|
||||
);
|
||||
|
||||
const [results, setResults] = useState<string[]>([]);
|
||||
const [isFetching, setFetching] = useState<boolean>(false);
|
||||
|
||||
useEffect(
|
||||
() =>
|
||||
setCheckedItems(defaultTo(selectedFilters?.[type]?.values as string[], [])),
|
||||
() => setCheckedItems(convertToStringArr(selectedFilters?.[type]?.values)),
|
||||
[selectedFilters, type],
|
||||
);
|
||||
|
||||
@@ -92,17 +92,21 @@ export function SectionBody(props: SectionBodyProps): JSX.Element {
|
||||
if (checked) {
|
||||
addFilter(type, newValue, setSelectedFilters, keys);
|
||||
setCheckedItems((prev) => {
|
||||
if (!prev.includes(newValue)) {
|
||||
prev.push(newValue);
|
||||
const arr = prev || [];
|
||||
if (isArray(arr) && !arr.includes(newValue)) {
|
||||
arr.push(newValue);
|
||||
}
|
||||
return prev;
|
||||
return convertToStringArr(arr);
|
||||
});
|
||||
} else if (checkedItems.length === 1) {
|
||||
handleRun({ clearByType: type });
|
||||
setCheckedItems([]);
|
||||
} else {
|
||||
removeFilter(type, newValue, setSelectedFilters, keys);
|
||||
setCheckedItems((prev) => prev.filter((item) => item !== newValue));
|
||||
setCheckedItems((prev) => {
|
||||
const prevValue = convertToStringArr(prev);
|
||||
return prevValue.filter((item) => item !== newValue);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
/* eslint-disable react-hooks/exhaustive-deps */
|
||||
import { getAttributesValues } from 'api/queryBuilder/getAttributesValues';
|
||||
import { isArray } from 'lodash-es';
|
||||
import { Dispatch, SetStateAction, useEffect, useState } from 'react';
|
||||
import {
|
||||
BaseAutocompleteData,
|
||||
@@ -41,6 +40,18 @@ export type FilterType = Record<
|
||||
{ values: string[] | string; keys: BaseAutocompleteData }
|
||||
>;
|
||||
|
||||
export function convertToStringArr(
|
||||
value: string | string[] | undefined,
|
||||
): string[] {
|
||||
if (value) {
|
||||
if (typeof value === 'string') {
|
||||
return [value];
|
||||
}
|
||||
return value;
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
export const addFilter = (
|
||||
filterType: AllTraceFilterKeys,
|
||||
value: string,
|
||||
@@ -62,28 +73,36 @@ export const addFilter = (
|
||||
'durationNano',
|
||||
].includes(filterType);
|
||||
|
||||
// Convert value to string array
|
||||
const valueArray = convertToStringArr(value);
|
||||
|
||||
// If previous filters are undefined, initialize them
|
||||
if (!prevFilters) {
|
||||
return ({
|
||||
[filterType]: { values: isDuration ? value : [value], keys },
|
||||
[filterType]: { values: isDuration ? value : valueArray, keys },
|
||||
} as unknown) as FilterType;
|
||||
}
|
||||
|
||||
// If the filter type doesn't exist, initialize it
|
||||
if (!prevFilters[filterType]?.values.length) {
|
||||
return {
|
||||
...prevFilters,
|
||||
[filterType]: { values: isDuration ? value : [value], keys },
|
||||
[filterType]: { values: isDuration ? value : valueArray, keys },
|
||||
};
|
||||
}
|
||||
|
||||
// If the value already exists, don't add it again
|
||||
if (prevFilters[filterType].values.includes(value)) {
|
||||
if (convertToStringArr(prevFilters[filterType].values).includes(value)) {
|
||||
return prevFilters;
|
||||
}
|
||||
|
||||
// Otherwise, add the value to the existing array
|
||||
return {
|
||||
...prevFilters,
|
||||
[filterType]: {
|
||||
values: isDuration ? value : [...prevFilters[filterType].values, value],
|
||||
values: isDuration
|
||||
? value
|
||||
: [...convertToStringArr(prevFilters[filterType].values), value],
|
||||
keys,
|
||||
},
|
||||
};
|
||||
@@ -110,10 +129,8 @@ export const removeFilter = (
|
||||
return prevFilters;
|
||||
}
|
||||
|
||||
const prevValue = prevFilters[filterType]?.values;
|
||||
const updatedValues = !isArray(prevValue)
|
||||
? prevValue
|
||||
: prevValue?.filter((item: any) => item !== value);
|
||||
const prevValue = convertToStringArr(prevFilters[filterType]?.values);
|
||||
const updatedValues = prevValue.filter((item: any) => item !== value);
|
||||
|
||||
if (updatedValues.length === 0) {
|
||||
const { [filterType]: item, ...remainingFilters } = prevFilters;
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
import './TracesExplorer.styles.scss';
|
||||
|
||||
import { FilterOutlined } from '@ant-design/icons';
|
||||
import * as Sentry from '@sentry/react';
|
||||
import { Button, Card, Tabs, Tooltip } from 'antd';
|
||||
import axios from 'axios';
|
||||
import ExplorerCard from 'components/ExplorerCard/ExplorerCard';
|
||||
import { LOCALSTORAGE } from 'constants/localStorage';
|
||||
import { AVAILABLE_EXPORT_PANEL_TYPES } from 'constants/panelTypes';
|
||||
import { initialQueriesMap, PANEL_TYPES } from 'constants/queryBuilder';
|
||||
import ExplorerOptionWrapper from 'container/ExplorerOptions/ExplorerOptionWrapper';
|
||||
import ExportPanel from 'container/ExportPanel';
|
||||
import { useOptionsMenu } from 'container/OptionsMenu';
|
||||
import RightToolbarActions from 'container/QueryBuilder/components/ToolbarActions/RightToolbarActions';
|
||||
import DateTimeSelector from 'container/TopNav/DateTimeSelectionV2';
|
||||
import { defaultSelectedColumns } from 'container/TracesExplorer/ListView/configs';
|
||||
import QuerySection from 'container/TracesExplorer/QuerySection';
|
||||
import { useUpdateDashboard } from 'hooks/dashboard/useUpdateDashboard';
|
||||
import { addEmptyWidgetInDashboardJSONWithQuery } from 'hooks/dashboard/utils';
|
||||
@@ -19,10 +23,11 @@ import { useShareBuilderUrl } from 'hooks/queryBuilder/useShareBuilderUrl';
|
||||
import { useHandleExplorerTabChange } from 'hooks/useHandleExplorerTabChange';
|
||||
import { useNotifications } from 'hooks/useNotifications';
|
||||
import history from 'lib/history';
|
||||
import { cloneDeep, set } from 'lodash-es';
|
||||
import ErrorBoundaryFallback from 'pages/ErrorBoundaryFallback/ErrorBoundaryFallback';
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react';
|
||||
import { ErrorBoundary } from 'react-error-boundary';
|
||||
import { Dashboard } from 'types/api/dashboard/getAll';
|
||||
import { Query } from 'types/api/queryBuilder/queryBuilderData';
|
||||
import { DataSource } from 'types/common/queryBuilder';
|
||||
import { generateExportToDashboardLink } from 'utils/dashboard/generateExportToDashboardLink';
|
||||
import { v4 } from 'uuid';
|
||||
@@ -42,6 +47,15 @@ function TracesExplorer(): JSX.Element {
|
||||
stagedQuery,
|
||||
} = useQueryBuilder();
|
||||
|
||||
const { options } = useOptionsMenu({
|
||||
storageKey: LOCALSTORAGE.TRACES_LIST_OPTIONS,
|
||||
dataSource: DataSource.TRACES,
|
||||
aggregateOperator: 'noop',
|
||||
initialOptions: {
|
||||
selectColumns: defaultSelectedColumns,
|
||||
},
|
||||
});
|
||||
|
||||
const currentPanelType = useGetPanelTypesQueryParam();
|
||||
|
||||
const { handleExplorerTabChange } = useHandleExplorerTabChange();
|
||||
@@ -101,6 +115,18 @@ function TracesExplorer(): JSX.Element {
|
||||
|
||||
const { mutate: updateDashboard, isLoading } = useUpdateDashboard();
|
||||
|
||||
const getUpdatedQueryForExport = (): Query => {
|
||||
const updatedQuery = cloneDeep(currentQuery);
|
||||
|
||||
set(
|
||||
updatedQuery,
|
||||
'builder.queryData[0].selectColumns',
|
||||
options.selectColumns,
|
||||
);
|
||||
|
||||
return updatedQuery;
|
||||
};
|
||||
|
||||
const handleExport = useCallback(
|
||||
(dashboard: Dashboard | null): void => {
|
||||
if (!dashboard || !panelType) return;
|
||||
@@ -111,11 +137,17 @@ function TracesExplorer(): JSX.Element {
|
||||
|
||||
const widgetId = v4();
|
||||
|
||||
const query =
|
||||
panelType === PANEL_TYPES.LIST
|
||||
? getUpdatedQueryForExport()
|
||||
: exportDefaultQuery;
|
||||
|
||||
const updatedDashboard = addEmptyWidgetInDashboardJSONWithQuery(
|
||||
dashboard,
|
||||
exportDefaultQuery,
|
||||
query,
|
||||
widgetId,
|
||||
panelTypeParam,
|
||||
options.selectColumns,
|
||||
);
|
||||
|
||||
updateDashboard(updatedDashboard, {
|
||||
@@ -144,7 +176,7 @@ function TracesExplorer(): JSX.Element {
|
||||
return;
|
||||
}
|
||||
const dashboardEditView = generateExportToDashboardLink({
|
||||
query: exportDefaultQuery,
|
||||
query,
|
||||
panelType: panelTypeParam,
|
||||
dashboardId: data.payload?.uuid || '',
|
||||
widgetId,
|
||||
@@ -161,6 +193,7 @@ function TracesExplorer(): JSX.Element {
|
||||
},
|
||||
});
|
||||
},
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[exportDefaultQuery, notifications, panelType, updateDashboard],
|
||||
);
|
||||
|
||||
@@ -185,7 +218,7 @@ function TracesExplorer(): JSX.Element {
|
||||
const [isOpen, setOpen] = useState<boolean>(true);
|
||||
|
||||
return (
|
||||
<ErrorBoundary FallbackComponent={ErrorBoundaryFallback}>
|
||||
<Sentry.ErrorBoundary fallback={<ErrorBoundaryFallback />}>
|
||||
<div className="trace-explorer-page">
|
||||
<Card className="filter" hidden={!isOpen}>
|
||||
<Filter setOpen={setOpen} />
|
||||
@@ -236,7 +269,7 @@ function TracesExplorer(): JSX.Element {
|
||||
/>
|
||||
</Card>
|
||||
</div>
|
||||
</ErrorBoundary>
|
||||
</Sentry.ErrorBoundary>
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
69
frontend/src/periscope/components/Table/Table.styles.scss
Normal file
@@ -0,0 +1,69 @@
|
||||
table,
|
||||
.divTable {
|
||||
border: 1px solid lightgray;
|
||||
width: fit-content;
|
||||
}
|
||||
|
||||
.tr {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
tr,
|
||||
.tr {
|
||||
width: fit-content;
|
||||
height: 30px;
|
||||
}
|
||||
|
||||
th,
|
||||
.th,
|
||||
td,
|
||||
.td {
|
||||
box-shadow: inset 0 0 0 1px lightgray;
|
||||
padding: 0.25rem;
|
||||
}
|
||||
|
||||
th,
|
||||
.th {
|
||||
padding: 2px 4px;
|
||||
position: relative;
|
||||
font-weight: bold;
|
||||
text-align: center;
|
||||
height: 30px;
|
||||
}
|
||||
|
||||
td,
|
||||
.td {
|
||||
height: 30px;
|
||||
}
|
||||
|
||||
.resizer {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
height: 100%;
|
||||
right: 0;
|
||||
width: 5px;
|
||||
background: rgba(0, 0, 0, 0.5);
|
||||
cursor: col-resize;
|
||||
user-select: none;
|
||||
touch-action: none;
|
||||
}
|
||||
|
||||
.resizer.isResizing {
|
||||
background: blue;
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
@media (hover: hover) {
|
||||
.resizer {
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
*:hover > .resizer {
|
||||
opacity: 1;
|
||||
}
|
||||
}
|
||||
|
||||
.container {
|
||||
border: 1px solid lightgray;
|
||||
margin: 1rem auto;
|
||||
}
|
||||
295
frontend/src/periscope/components/Table/Table.tsx
Normal file
@@ -0,0 +1,295 @@
|
||||
/* eslint-disable react/jsx-props-no-spreading */
|
||||
/* eslint-disable react/no-unstable-nested-components */
|
||||
import './Table.styles.scss';
|
||||
|
||||
// needed for table body level scope DnD setup
|
||||
import {
|
||||
closestCenter,
|
||||
DndContext,
|
||||
DragEndEvent,
|
||||
KeyboardSensor,
|
||||
MouseSensor,
|
||||
TouchSensor,
|
||||
useSensor,
|
||||
useSensors,
|
||||
} from '@dnd-kit/core';
|
||||
import { restrictToHorizontalAxis } from '@dnd-kit/modifiers';
|
||||
import {
|
||||
arrayMove,
|
||||
horizontalListSortingStrategy,
|
||||
SortableContext,
|
||||
useSortable,
|
||||
} from '@dnd-kit/sortable';
|
||||
// needed for row & cell level scope DnD setup
|
||||
import { CSS } from '@dnd-kit/utilities';
|
||||
import {
|
||||
Cell,
|
||||
ColumnDef,
|
||||
flexRender,
|
||||
getCoreRowModel,
|
||||
Header,
|
||||
Table,
|
||||
useReactTable,
|
||||
} from '@tanstack/react-table';
|
||||
import React, { CSSProperties } from 'react';
|
||||
|
||||
import { makeData, Person } from './makeData';
|
||||
|
||||
function DraggableTableHeader({
|
||||
header,
|
||||
}: {
|
||||
header: Header<Person, unknown>;
|
||||
}): JSX.Element {
|
||||
const {
|
||||
attributes,
|
||||
isDragging,
|
||||
listeners,
|
||||
setNodeRef,
|
||||
transform,
|
||||
} = useSortable({
|
||||
id: header.column.id,
|
||||
});
|
||||
|
||||
const style: CSSProperties = {
|
||||
opacity: isDragging ? 0.8 : 1,
|
||||
position: 'relative',
|
||||
transform: CSS.Translate.toString(transform), // translate instead of transform to avoid squishing
|
||||
transition: 'width transform 0.2s ease-in-out',
|
||||
whiteSpace: 'nowrap',
|
||||
width: header.column.getSize(),
|
||||
zIndex: isDragging ? 1 : 0,
|
||||
};
|
||||
|
||||
return (
|
||||
<th colSpan={header.colSpan} ref={setNodeRef} style={style}>
|
||||
{header.isPlaceholder
|
||||
? null
|
||||
: flexRender(header.column.columnDef.header, header.getContext())}
|
||||
<button type="button" {...attributes} {...listeners}>
|
||||
🟰
|
||||
</button>
|
||||
|
||||
<div
|
||||
{...{
|
||||
onDoubleClick: (): void => header.column.resetSize(),
|
||||
onMouseDown: header.getResizeHandler(),
|
||||
onTouchStart: header.getResizeHandler(),
|
||||
className: `resizer ${header.column.getIsResizing() ? 'isResizing' : ''}`,
|
||||
}}
|
||||
/>
|
||||
</th>
|
||||
);
|
||||
}
|
||||
|
||||
function DragAlongCell({ cell }: { cell: Cell<Person, unknown> }): JSX.Element {
|
||||
const { isDragging, setNodeRef, transform } = useSortable({
|
||||
id: cell.column.id,
|
||||
});
|
||||
|
||||
const style: CSSProperties = {
|
||||
opacity: isDragging ? 0.8 : 1,
|
||||
position: 'relative',
|
||||
transform: CSS.Translate.toString(transform), // translate instead of transform to avoid squishing
|
||||
transition: 'width transform 0.2s ease-in-out',
|
||||
width: cell.column.getSize(),
|
||||
zIndex: isDragging ? 1 : 0,
|
||||
};
|
||||
|
||||
return (
|
||||
<td style={style} ref={setNodeRef}>
|
||||
{flexRender(cell.column.columnDef.cell, cell.getContext())}
|
||||
</td>
|
||||
);
|
||||
}
|
||||
|
||||
// un-memoized normal table body component - see memoized version below
|
||||
function TableBody({ table }: { table: Table<Person> }): JSX.Element {
|
||||
const { columnOrder } = table.getState();
|
||||
|
||||
console.log('columnOrder', columnOrder);
|
||||
|
||||
return (
|
||||
<div
|
||||
{...{
|
||||
className: 'tbody',
|
||||
}}
|
||||
>
|
||||
{table.getRowModel().rows.map((row) => (
|
||||
<div key={row.id} className="tr">
|
||||
{row.getVisibleCells().map((cell) => (
|
||||
<SortableContext
|
||||
key={cell.id}
|
||||
items={columnOrder}
|
||||
strategy={horizontalListSortingStrategy}
|
||||
>
|
||||
<DragAlongCell key={cell.id} cell={cell} />
|
||||
</SortableContext>
|
||||
))}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
// special memoized wrapper for our table body that we will use during column resizing
|
||||
export const MemoizedTableBody = React.memo(
|
||||
TableBody,
|
||||
(prev, next) => prev.table.options.data === next.table.options.data,
|
||||
) as typeof TableBody;
|
||||
|
||||
function PeriscopeTable(): JSX.Element {
|
||||
const columns = React.useMemo<ColumnDef<Person>[]>(
|
||||
() => [
|
||||
{
|
||||
accessorKey: 'firstName',
|
||||
cell: (info): any => info.getValue(),
|
||||
id: 'firstName',
|
||||
size: 150,
|
||||
},
|
||||
{
|
||||
accessorFn: (row): any => row.lastName,
|
||||
cell: (info): any => info.getValue(),
|
||||
header: (): JSX.Element => <span>Last Name</span>,
|
||||
id: 'lastName',
|
||||
size: 150,
|
||||
},
|
||||
{
|
||||
accessorKey: 'age',
|
||||
header: (): any => 'Age',
|
||||
id: 'age',
|
||||
size: 120,
|
||||
},
|
||||
{
|
||||
accessorKey: 'visits',
|
||||
header: (): JSX.Element => <span>Visits</span>,
|
||||
id: 'visits',
|
||||
size: 120,
|
||||
},
|
||||
{
|
||||
accessorKey: 'status',
|
||||
header: 'Status',
|
||||
id: 'status',
|
||||
size: 150,
|
||||
},
|
||||
{
|
||||
accessorKey: 'progress',
|
||||
header: 'Profile Progress',
|
||||
id: 'progress',
|
||||
size: 180,
|
||||
},
|
||||
],
|
||||
[],
|
||||
);
|
||||
const [data, setData] = React.useState(() => makeData(20));
|
||||
const [columnOrder, setColumnOrder] = React.useState<string[]>(() =>
|
||||
columns.map((c) => c.id!),
|
||||
);
|
||||
const [columnVisibility, setColumnVisibility] = React.useState({});
|
||||
|
||||
const table = useReactTable({
|
||||
data,
|
||||
columns,
|
||||
getCoreRowModel: getCoreRowModel(),
|
||||
state: {
|
||||
columnOrder,
|
||||
columnVisibility,
|
||||
},
|
||||
defaultColumn: {
|
||||
minSize: 60,
|
||||
maxSize: 800,
|
||||
},
|
||||
columnResizeMode: 'onChange',
|
||||
onColumnOrderChange: setColumnOrder,
|
||||
onColumnVisibilityChange: setColumnVisibility,
|
||||
debugTable: true,
|
||||
debugHeaders: true,
|
||||
debugColumns: true,
|
||||
});
|
||||
|
||||
/**
|
||||
* Instead of calling `column.getSize()` on every render for every header
|
||||
* and especially every data cell (very expensive),
|
||||
* we will calculate all column sizes at once at the root table level in a useMemo
|
||||
* and pass the column sizes down as CSS variables to the <table> element.
|
||||
*/
|
||||
const columnSizeVars = React.useMemo(() => {
|
||||
const headers = table.getFlatHeaders();
|
||||
const colSizes: { [key: string]: number } = {};
|
||||
for (let i = 0; i < headers.length; i++) {
|
||||
const header = headers[i]!;
|
||||
colSizes[`--header-${header.id}-size`] = header.getSize();
|
||||
colSizes[`--col-${header.column.id}-size`] = header.column.getSize();
|
||||
}
|
||||
return colSizes;
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [table.getState().columnSizingInfo, table.getState().columnSizing]);
|
||||
|
||||
// reorder columns after drag & drop
|
||||
function handleDragEnd(event: DragEndEvent): void {
|
||||
const { active, over } = event;
|
||||
|
||||
console.log('active', active, over);
|
||||
if (active && over && active.id !== over.id) {
|
||||
setColumnOrder((columnOrder) => {
|
||||
const oldIndex = columnOrder.indexOf(active.id as string);
|
||||
const newIndex = columnOrder.indexOf(over.id as string);
|
||||
return arrayMove(columnOrder, oldIndex, newIndex); // this is just a splice util
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const sensors = useSensors(
|
||||
useSensor(MouseSensor, {}),
|
||||
useSensor(TouchSensor, {}),
|
||||
useSensor(KeyboardSensor, {}),
|
||||
);
|
||||
|
||||
return (
|
||||
<DndContext
|
||||
collisionDetection={closestCenter}
|
||||
modifiers={[restrictToHorizontalAxis]}
|
||||
// eslint-disable-next-line react/jsx-no-bind
|
||||
onDragEnd={handleDragEnd}
|
||||
sensors={sensors}
|
||||
>
|
||||
<div className="p-2">
|
||||
<div className="overflow-x-auto">
|
||||
<div
|
||||
className="divTable"
|
||||
style={{
|
||||
...columnSizeVars, // Define column sizes on the <table> element
|
||||
width: table.getTotalSize(),
|
||||
}}
|
||||
>
|
||||
<div className="thead">
|
||||
{table.getHeaderGroups().map((headerGroup) => (
|
||||
<div key={headerGroup.id} className="tr">
|
||||
<SortableContext
|
||||
items={columnOrder}
|
||||
strategy={horizontalListSortingStrategy}
|
||||
>
|
||||
{headerGroup.headers.map((header) => (
|
||||
<div
|
||||
key={header.id}
|
||||
className="th"
|
||||
style={{
|
||||
width: `calc(var(--header-${header?.id}-size) * 1px)`,
|
||||
}}
|
||||
>
|
||||
<DraggableTableHeader key={header.id} header={header} />
|
||||
</div>
|
||||
))}
|
||||
</SortableContext>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
|
||||
<TableBody table={table} />
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</DndContext>
|
||||
);
|
||||
}
|
||||
|
||||
export default PeriscopeTable;
|
||||
46
frontend/src/periscope/components/Table/makeData.ts
Normal file
@@ -0,0 +1,46 @@
|
||||
import { faker } from '@faker-js/faker';
|
||||
|
||||
export type Person = {
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
age: number;
|
||||
visits: number;
|
||||
progress: number;
|
||||
status: 'relationship' | 'complicated' | 'single';
|
||||
subRows?: Person[];
|
||||
};
|
||||
|
||||
const range = (len: number) => {
|
||||
const arr: number[] = [];
|
||||
for (let i = 0; i < len; i++) {
|
||||
arr.push(i);
|
||||
}
|
||||
return arr;
|
||||
};
|
||||
|
||||
const newPerson = (): Person => ({
|
||||
firstName: faker.person.firstName(),
|
||||
lastName: faker.person.lastName(),
|
||||
age: faker.number.int(40),
|
||||
visits: faker.number.int(1000),
|
||||
progress: faker.number.int(100),
|
||||
status: faker.helpers.shuffle<Person['status']>([
|
||||
'relationship',
|
||||
'complicated',
|
||||
'single',
|
||||
])[0]!,
|
||||
});
|
||||
|
||||
export function makeData(...lens: number[]): any {
|
||||
const makeDataLevel = (depth = 0): Person[] => {
|
||||
const len = lens[depth]!;
|
||||
return range(len).map(
|
||||
(d): Person => ({
|
||||
...newPerson(),
|
||||
subRows: lens[depth + 1] ? makeDataLevel(depth + 1) : undefined,
|
||||
}),
|
||||
);
|
||||
};
|
||||
|
||||
return makeDataLevel();
|
||||
}
|
||||
@@ -18,6 +18,7 @@ export type QueryRangePayload = {
|
||||
promQueries?: Record<string, IPromQLQuery>;
|
||||
queryType: EQueryType;
|
||||
panelType: PANEL_TYPES;
|
||||
fillGaps?: boolean;
|
||||
};
|
||||
end: number;
|
||||
start: number;
|
||||
|
||||
@@ -74,6 +74,7 @@ export type IBuilderQuery = {
|
||||
legend: string;
|
||||
pageSize?: number;
|
||||
offset?: number;
|
||||
selectColumns?: BaseAutocompleteData[];
|
||||
};
|
||||
|
||||
export interface IClickHouseQuery {
|
||||
|
||||
@@ -86,6 +86,7 @@ export const routePermission: Record<keyof typeof ROUTES, ROLES[]> = {
|
||||
GET_STARTED_INFRASTRUCTURE_MONITORING: ['ADMIN', 'EDITOR', 'VIEWER'],
|
||||
GET_STARTED_LOGS_MANAGEMENT: ['ADMIN', 'EDITOR', 'VIEWER'],
|
||||
GET_STARTED_AWS_MONITORING: ['ADMIN', 'EDITOR', 'VIEWER'],
|
||||
GET_STARTED_AZURE_MONITORING: ['ADMIN', 'EDITOR', 'VIEWER'],
|
||||
WORKSPACE_LOCKED: ['ADMIN', 'EDITOR', 'VIEWER'],
|
||||
BILLING: ['ADMIN', 'EDITOR', 'VIEWER'],
|
||||
SUPPORT: ['ADMIN', 'EDITOR', 'VIEWER'],
|
||||
|
||||
@@ -2541,7 +2541,7 @@
|
||||
"@dnd-kit/utilities" "^3.2.2"
|
||||
tslib "^2.0.0"
|
||||
|
||||
"@dnd-kit/utilities@^3.2.2":
|
||||
"@dnd-kit/utilities@3.2.2", "@dnd-kit/utilities@^3.2.2":
|
||||
version "3.2.2"
|
||||
resolved "https://registry.yarnpkg.com/@dnd-kit/utilities/-/utilities-3.2.2.tgz#5a32b6af356dc5f74d61b37d6f7129a4040ced7b"
|
||||
integrity sha512-+MKAJEOfaBe5SmV6t34p80MMKhjvUz0vRrvVJbPT0WElzaOJ/1xs+D+KDv+tD/NE5ujfrChEcshd4fLn0wpiqg==
|
||||
@@ -2597,6 +2597,11 @@
|
||||
minimatch "^3.0.4"
|
||||
strip-json-comments "^3.1.1"
|
||||
|
||||
"@faker-js/faker@8.4.1":
|
||||
version "8.4.1"
|
||||
resolved "https://registry.yarnpkg.com/@faker-js/faker/-/faker-8.4.1.tgz#5d5e8aee8fce48f5e189bf730ebd1f758f491451"
|
||||
integrity sha512-XQ3cU+Q8Uqmrbf2e0cIC/QN43sTBSC8KF12u29Mb47tWrt2hAgBXSgpZMj4Ao8Uk0iJcU99QsOCaIL8934obCg==
|
||||
|
||||
"@floating-ui/core@^1.4.2":
|
||||
version "1.5.2"
|
||||
resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.5.2.tgz#53a0f7a98c550e63134d504f26804f6b83dbc071"
|
||||
@@ -3800,6 +3805,18 @@
|
||||
dependencies:
|
||||
"@sinonjs/commons" "^1.7.0"
|
||||
|
||||
"@tanstack/react-table@8.17.3":
|
||||
version "8.17.3"
|
||||
resolved "https://registry.yarnpkg.com/@tanstack/react-table/-/react-table-8.17.3.tgz#4e10b4cf5355a40d6d72a83d3f4b3ecd32f56bf4"
|
||||
integrity sha512-5gwg5SvPD3lNAXPuJJz1fOCEZYk9/GeBFH3w/hCgnfyszOIzwkwgp5I7Q4MJtn0WECp84b5STQUDdmvGi8m3nA==
|
||||
dependencies:
|
||||
"@tanstack/table-core" "8.17.3"
|
||||
|
||||
"@tanstack/table-core@8.17.3":
|
||||
version "8.17.3"
|
||||
resolved "https://registry.yarnpkg.com/@tanstack/table-core/-/table-core-8.17.3.tgz#d7a9830abb29cd369b52b2a7159dc0360af646fd"
|
||||
integrity sha512-mPBodDGVL+fl6d90wUREepHa/7lhsghg2A3vFpakEhrhtbIlgNAZiMr7ccTgak5qbHqF14Fwy+W1yFWQt+WmYQ==
|
||||
|
||||
"@testing-library/dom@^8.5.0":
|
||||
version "8.20.0"
|
||||
resolved "https://registry.npmjs.org/@testing-library/dom/-/dom-8.20.0.tgz"
|
||||
|
||||
176
go.mod
@@ -6,13 +6,13 @@ require (
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.20.0
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2
|
||||
github.com/SigNoz/govaluate v0.0.0-20240203125216-988004ccc7fd
|
||||
github.com/SigNoz/signoz-otel-collector v0.88.24
|
||||
github.com/SigNoz/signoz-otel-collector v0.102.0
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_encoder v0.0.0-20230822164844-1b861a431974
|
||||
github.com/SigNoz/zap_otlp/zap_otlp_sync v0.0.0-20230822164844-1b861a431974
|
||||
github.com/antonmedv/expr v1.15.3
|
||||
github.com/auth0/go-jwt-middleware v1.0.1
|
||||
github.com/cespare/xxhash v1.1.0
|
||||
github.com/coreos/go-oidc/v3 v3.4.0
|
||||
github.com/coreos/go-oidc/v3 v3.10.0
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/go-co-op/gocron v1.30.1
|
||||
github.com/go-kit/log v0.2.1
|
||||
@@ -21,7 +21,7 @@ require (
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/gosimple/slug v1.10.0
|
||||
github.com/jmoiron/sqlx v1.3.4
|
||||
github.com/json-iterator/go v1.1.12
|
||||
@@ -29,18 +29,18 @@ require (
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible
|
||||
github.com/minio/minio-go/v6 v6.0.57
|
||||
github.com/mitchellh/mapstructure v1.5.1-0.20220423185008-bf980b35cac4
|
||||
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c
|
||||
github.com/oklog/oklog v0.3.2
|
||||
github.com/open-telemetry/opamp-go v0.5.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.88.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.88.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.102.0
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/processor/logstransformprocessor v0.102.0
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/posthog/posthog-go v0.0.0-20220817142604-0b0bbf0f9c0f
|
||||
github.com/prometheus/common v0.44.0
|
||||
github.com/prometheus/common v0.54.0
|
||||
github.com/prometheus/prometheus v2.5.0+incompatible
|
||||
github.com/rs/cors v1.10.1
|
||||
github.com/rs/cors v1.11.0
|
||||
github.com/russellhaering/gosaml2 v0.9.0
|
||||
github.com/russellhaering/goxmldsig v1.2.0
|
||||
github.com/samber/lo v1.38.1
|
||||
@@ -49,93 +49,90 @@ require (
|
||||
github.com/soheilhy/cmux v0.1.5
|
||||
github.com/srikanthccv/ClickHouse-go-mock v0.7.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
go.opentelemetry.io/collector/component v0.88.0
|
||||
go.opentelemetry.io/collector/confmap v0.88.0
|
||||
go.opentelemetry.io/collector/connector v0.88.0
|
||||
go.opentelemetry.io/collector/consumer v0.88.0
|
||||
go.opentelemetry.io/collector/exporter v0.88.0
|
||||
go.opentelemetry.io/collector/extension v0.88.0
|
||||
go.opentelemetry.io/collector/otelcol v0.88.0
|
||||
go.opentelemetry.io/collector/pdata v1.3.0
|
||||
go.opentelemetry.io/collector/processor v0.88.0
|
||||
go.opentelemetry.io/collector/receiver v0.88.0
|
||||
go.opentelemetry.io/collector/service v0.88.0
|
||||
go.opentelemetry.io/otel v1.24.0
|
||||
go.opentelemetry.io/otel/sdk v1.23.1
|
||||
go.opentelemetry.io/collector/component v0.102.1
|
||||
go.opentelemetry.io/collector/confmap v0.102.1
|
||||
go.opentelemetry.io/collector/confmap/converter/expandconverter v0.102.0
|
||||
go.opentelemetry.io/collector/confmap/provider/fileprovider v0.102.0
|
||||
go.opentelemetry.io/collector/connector v0.102.0
|
||||
go.opentelemetry.io/collector/consumer v0.102.1
|
||||
go.opentelemetry.io/collector/exporter v0.102.0
|
||||
go.opentelemetry.io/collector/extension v0.102.1
|
||||
go.opentelemetry.io/collector/otelcol v0.102.0
|
||||
go.opentelemetry.io/collector/pdata v1.9.0
|
||||
go.opentelemetry.io/collector/processor v0.102.0
|
||||
go.opentelemetry.io/collector/receiver v0.102.0
|
||||
go.opentelemetry.io/collector/service v0.102.0
|
||||
go.opentelemetry.io/otel v1.27.0
|
||||
go.opentelemetry.io/otel/sdk v1.27.0
|
||||
go.uber.org/multierr v1.11.0
|
||||
go.uber.org/zap v1.27.0
|
||||
golang.org/x/crypto v0.24.0
|
||||
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1
|
||||
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842
|
||||
golang.org/x/net v0.26.0
|
||||
golang.org/x/oauth2 v0.16.0
|
||||
golang.org/x/oauth2 v0.21.0
|
||||
golang.org/x/text v0.16.0
|
||||
google.golang.org/grpc v1.62.0
|
||||
google.golang.org/protobuf v1.33.0
|
||||
google.golang.org/grpc v1.64.0
|
||||
google.golang.org/protobuf v1.34.1
|
||||
gopkg.in/segmentio/analytics-go.v3 v3.1.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
k8s.io/apimachinery v0.28.2
|
||||
k8s.io/apimachinery v0.29.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.8.0 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
|
||||
github.com/ClickHouse/ch-go v0.61.3 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
|
||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.45.26 // indirect
|
||||
github.com/aws/aws-sdk-go v1.53.16 // indirect
|
||||
github.com/beevik/etree v1.1.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dennwc/varint v1.0.0 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/edsrzf/mmap-go v1.1.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/expr-lang/expr v1.16.9 // indirect
|
||||
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect
|
||||
github.com/go-faster/city v1.0.1 // indirect
|
||||
github.com/go-faster/errors v0.7.1 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.1 // indirect
|
||||
github.com/go-logfmt/logfmt v0.6.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/gosimple/unidecode v1.0.0 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
|
||||
github.com/hashicorp/go-version v1.7.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/influxdata/go-syslog/v3 v3.0.1-0.20210608084020-ac565dc76ba6 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jonboulle/clockwork v0.2.2 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/klauspost/compress v1.17.8 // indirect
|
||||
github.com/klauspost/cpuid v1.2.3 // indirect
|
||||
github.com/knadh/koanf/v2 v2.0.1 // indirect
|
||||
github.com/knadh/koanf/v2 v2.1.1 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/leodido/ragel-machinery v0.0.0-20181214104525-299bdde78165 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/leodido/go-syslog/v4 v4.1.0 // indirect
|
||||
github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect
|
||||
github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/minio/md5-simd v1.1.0 // indirect
|
||||
github.com/minio/sha256-simd v0.1.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
@@ -145,68 +142,67 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
|
||||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/onsi/gomega v1.19.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.88.0 // indirect
|
||||
github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.102.0 // indirect
|
||||
github.com/paulmach/orb v0.11.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_golang v1.17.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
|
||||
github.com/prometheus/client_golang v1.19.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common/sigv4 v0.1.0 // indirect
|
||||
github.com/prometheus/procfs v0.11.1 // indirect
|
||||
github.com/prometheus/statsd_exporter v0.22.7 // indirect
|
||||
github.com/prometheus/procfs v0.15.0 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/segmentio/backo-go v1.0.1 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.23.12 // indirect
|
||||
github.com/shirou/gopsutil/v3 v3.24.4 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/smarty/assertions v1.15.0 // indirect
|
||||
github.com/spf13/cobra v1.7.0 // indirect
|
||||
github.com/spf13/cobra v1.8.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/valyala/fastjson v1.6.4 // indirect
|
||||
github.com/vjeantet/grok v1.0.1 // indirect
|
||||
github.com/xtgo/uuid v0.0.0-20140804021211-a0b114877d4c // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.3 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/collector v0.88.0 // indirect
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.88.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.0.0-rcv0017 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.88.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.20.0 // indirect
|
||||
go.opentelemetry.io/otel/bridge/opencensus v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.42.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
go.opentelemetry.io/collector v0.102.1 // indirect
|
||||
go.opentelemetry.io/collector/config/configtelemetry v0.102.1 // indirect
|
||||
go.opentelemetry.io/collector/confmap/provider/envprovider v0.102.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/provider/httpprovider v0.102.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/provider/httpsprovider v0.102.0 // indirect
|
||||
go.opentelemetry.io/collector/confmap/provider/yamlprovider v0.102.0 // indirect
|
||||
go.opentelemetry.io/collector/featuregate v1.9.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.102.0 // indirect
|
||||
go.opentelemetry.io/contrib/config v0.7.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/bridge/opencensus v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.27.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.27.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/goleak v1.3.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
gonum.org/v1/gonum v0.14.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
gonum.org/v1/gonum v0.15.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
k8s.io/klog/v2 v2.110.1 // indirect
|
||||
k8s.io/utils v0.0.0-20230711102312-30195339c3c7 // indirect
|
||||
k8s.io/client-go v0.29.3 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/prometheus/prometheus => github.com/SigNoz/prometheus v1.11.0
|
||||
replace github.com/prometheus/prometheus => github.com/SigNoz/prometheus v1.11.1
|
||||
|
||||