mirror of
https://github.com/koush/scrypted.git
synced 2026-02-05 23:22:13 +00:00
Compare commits
87 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f9f597ef01 | ||
|
|
2e07788c0c | ||
|
|
9c0fbc1cb6 | ||
|
|
239d49899d | ||
|
|
2d3589b5a3 | ||
|
|
96ec465a38 | ||
|
|
5bb6b87c7d | ||
|
|
fcfedccaf8 | ||
|
|
98373833fd | ||
|
|
03588be125 | ||
|
|
cdd81daec5 | ||
|
|
d64f90c0c8 | ||
|
|
ec31dee36e | ||
|
|
11f2e88590 | ||
|
|
bf51ddb2d5 | ||
|
|
26000f1828 | ||
|
|
f65485af97 | ||
|
|
72c5690d05 | ||
|
|
e076d61122 | ||
|
|
7071808514 | ||
|
|
1e2fd46cd3 | ||
|
|
e3cdd4326f | ||
|
|
227f932ad8 | ||
|
|
67cec188ce | ||
|
|
1ee276185e | ||
|
|
42ed855b05 | ||
|
|
93da4eed30 | ||
|
|
a72a596578 | ||
|
|
72663dd68c | ||
|
|
108d57dbdd | ||
|
|
bc71fd8515 | ||
|
|
a51070767b | ||
|
|
269cc4dbc9 | ||
|
|
684961fa4b | ||
|
|
4f60b7e379 | ||
|
|
5d72061151 | ||
|
|
f2c940c1d3 | ||
|
|
7e817b0b30 | ||
|
|
75bb15d3b7 | ||
|
|
ba1a1eff67 | ||
|
|
5432b5b917 | ||
|
|
f677cf7393 | ||
|
|
bdf9278131 | ||
|
|
0ae93a9c3f | ||
|
|
72422cdd8b | ||
|
|
390d1b3329 | ||
|
|
024e99766a | ||
|
|
0160502da8 | ||
|
|
f0d65982de | ||
|
|
1445933bd4 | ||
|
|
508f31c254 | ||
|
|
fd1aa10a2a | ||
|
|
fceed68d75 | ||
|
|
955e780c64 | ||
|
|
452fe20e8f | ||
|
|
9083e16cdb | ||
|
|
840a278e5d | ||
|
|
6d036dbd60 | ||
|
|
d5ba6f34d6 | ||
|
|
0321846c22 | ||
|
|
714747fcee | ||
|
|
e3906da3c4 | ||
|
|
820ef70033 | ||
|
|
0c95f5c052 | ||
|
|
4cfd7c4362 | ||
|
|
1e8126dec8 | ||
|
|
d3fbc58736 | ||
|
|
46113744b3 | ||
|
|
3947624ae0 | ||
|
|
4ac5ded012 | ||
|
|
aadfacf50a | ||
|
|
bb1e0ac82b | ||
|
|
23a15a1533 | ||
|
|
01dd480c01 | ||
|
|
364cae3273 | ||
|
|
8a986ab707 | ||
|
|
ca96959de8 | ||
|
|
2f0ae9ef50 | ||
|
|
8b84bac2c2 | ||
|
|
976ed7f1a5 | ||
|
|
b4e6821da8 | ||
|
|
540b990a08 | ||
|
|
ce75b072da | ||
|
|
5bca9b7156 | ||
|
|
ae4914346b | ||
|
|
b593209558 | ||
|
|
9df399708f |
64
.github/workflows/test.yml
vendored
64
.github/workflows/test.yml
vendored
@@ -9,52 +9,28 @@ on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test_linux_local:
|
||||
name: Test Linux local installation
|
||||
runs-on: ubuntu-latest
|
||||
test_local:
|
||||
name: Test local installation on ${{ matrix.runner }}
|
||||
runs-on: ${{ matrix.runner }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
runner: [ubuntu-latest, macos-14, macos-13, windows-latest]
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run install script
|
||||
|
||||
- name: Parse latest server release
|
||||
id: parse_server
|
||||
shell: bash
|
||||
run: |
|
||||
cat ./install/local/install-scrypted-dependencies-linux.sh | sudo SERVICE_USER=$USER bash
|
||||
|
||||
- name: Test server is running
|
||||
run: |
|
||||
systemctl status scrypted.service
|
||||
curl -k --retry 20 --retry-all-errors --retry-max-time 600 https://localhost:10443/
|
||||
|
||||
test_mac_local:
|
||||
name: Test Mac local installation
|
||||
runs-on: macos-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run install script
|
||||
run: |
|
||||
mkdir -p ~/.scrypted
|
||||
bash ./install/local/install-scrypted-dependencies-mac.sh
|
||||
|
||||
- name: Test server is running
|
||||
run: |
|
||||
curl -k --retry 20 --retry-all-errors --retry-max-time 600 https://localhost:10443/
|
||||
|
||||
test_windows_local:
|
||||
name: Test Windows local installation
|
||||
runs-on: windows-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run install script
|
||||
run: |
|
||||
.\install\local\install-scrypted-dependencies-win.ps1
|
||||
|
||||
- name: Test server is running
|
||||
run: |
|
||||
curl -k --retry 20 --retry-all-errors --retry-max-time 600 https://localhost:10443/
|
||||
VERSION=$(cat ./server/package-lock.json | jq -r '.version')
|
||||
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
|
||||
echo "Will test @scrypted/server@$VERSION"
|
||||
|
||||
- name: Install scrypted server
|
||||
uses: scryptedapp/setup-scrypted@v0.0.2
|
||||
with:
|
||||
branch: ${{ github.sha }}
|
||||
version: ${{ steps.parse_server.outputs.version }}
|
||||
103
common/package-lock.json
generated
103
common/package-lock.json
generated
@@ -74,7 +74,7 @@
|
||||
},
|
||||
"../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.3.4",
|
||||
"version": "0.3.29",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
@@ -111,64 +111,57 @@
|
||||
},
|
||||
"../server": {
|
||||
"name": "@scrypted/server",
|
||||
"version": "0.82.0",
|
||||
"version": "0.106.0",
|
||||
"hasInstallScript": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@mapbox/node-pre-gyp": "^1.0.11",
|
||||
"@scrypted/types": "^0.3.4",
|
||||
"adm-zip": "^0.5.10",
|
||||
"@scrypted/ffmpeg-static": "^6.1.0-build1",
|
||||
"@scrypted/node-pty": "^1.0.10",
|
||||
"@scrypted/types": "^0.3.28",
|
||||
"adm-zip": "^0.5.12",
|
||||
"body-parser": "^1.20.2",
|
||||
"cookie-parser": "^1.4.6",
|
||||
"debug": "^4.3.4",
|
||||
"dotenv": "^16.4.5",
|
||||
"engine.io": "^6.5.4",
|
||||
"express": "^4.18.2",
|
||||
"ffmpeg-static": "^5.2.0",
|
||||
"follow-redirects": "^1.15.4",
|
||||
"express": "^4.19.2",
|
||||
"follow-redirects": "^1.15.6",
|
||||
"http-auth": "^4.2.0",
|
||||
"ip": "^1.1.8",
|
||||
"level": "^8.0.0",
|
||||
"linkfs": "^2.1.0",
|
||||
"ip": "^2.0.1",
|
||||
"level": "^8.0.1",
|
||||
"lodash": "^4.17.21",
|
||||
"memfs": "^4.6.0",
|
||||
"mime": "^3.0.0",
|
||||
"nan": "^2.18.0",
|
||||
"nan": "^2.19.0",
|
||||
"node-dijkstra": "^2.5.0",
|
||||
"node-forge": "^1.3.1",
|
||||
"node-gyp": "^10.0.1",
|
||||
"node-gyp": "^10.1.0",
|
||||
"py": "npm:@bjia56/portable-python@^0.1.31",
|
||||
"router": "^1.3.8",
|
||||
"semver": "^7.5.4",
|
||||
"sharp": "^0.33.1",
|
||||
"semver": "^7.6.2",
|
||||
"sharp": "^0.33.3",
|
||||
"source-map-support": "^0.5.21",
|
||||
"tar": "^6.2.0",
|
||||
"tar": "^7.1.0",
|
||||
"tslib": "^2.6.2",
|
||||
"typescript": "^5.3.3",
|
||||
"typescript": "^5.4.5",
|
||||
"whatwg-mimetype": "^4.0.0",
|
||||
"ws": "^8.16.0"
|
||||
"ws": "^8.17.0"
|
||||
},
|
||||
"bin": {
|
||||
"scrypted-serve": "bin/scrypted-serve"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/adm-zip": "^0.5.5",
|
||||
"@types/cookie-parser": "^1.4.6",
|
||||
"@types/debug": "^4.1.12",
|
||||
"@types/cookie-parser": "^1.4.7",
|
||||
"@types/express": "^4.17.21",
|
||||
"@types/follow-redirects": "^1.14.4",
|
||||
"@types/http-auth": "^4.1.4",
|
||||
"@types/ip": "^1.1.3",
|
||||
"@types/lodash": "^4.14.202",
|
||||
"@types/mime": "^3.0.4",
|
||||
"@types/lodash": "^4.17.1",
|
||||
"@types/node-dijkstra": "^2.5.6",
|
||||
"@types/node-forge": "^1.3.10",
|
||||
"@types/pem": "^1.14.4",
|
||||
"@types/semver": "^7.5.6",
|
||||
"@types/node-forge": "^1.3.11",
|
||||
"@types/semver": "^7.5.8",
|
||||
"@types/source-map-support": "^0.5.10",
|
||||
"@types/tar": "^6.1.10",
|
||||
"@types/whatwg-mimetype": "^3.0.2",
|
||||
"@types/ws": "^8.5.10"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"node-pty-prebuilt-multiarch": "^0.10.1-pre.5"
|
||||
}
|
||||
},
|
||||
"node_modules/@cspotcode/source-map-support": {
|
||||
@@ -453,53 +446,47 @@
|
||||
"version": "file:../server",
|
||||
"requires": {
|
||||
"@mapbox/node-pre-gyp": "^1.0.11",
|
||||
"@scrypted/types": "^0.3.4",
|
||||
"@scrypted/ffmpeg-static": "^6.1.0-build1",
|
||||
"@scrypted/node-pty": "^1.0.10",
|
||||
"@scrypted/types": "^0.3.28",
|
||||
"@types/adm-zip": "^0.5.5",
|
||||
"@types/cookie-parser": "^1.4.6",
|
||||
"@types/debug": "^4.1.12",
|
||||
"@types/cookie-parser": "^1.4.7",
|
||||
"@types/express": "^4.17.21",
|
||||
"@types/follow-redirects": "^1.14.4",
|
||||
"@types/http-auth": "^4.1.4",
|
||||
"@types/ip": "^1.1.3",
|
||||
"@types/lodash": "^4.14.202",
|
||||
"@types/mime": "^3.0.4",
|
||||
"@types/lodash": "^4.17.1",
|
||||
"@types/node-dijkstra": "^2.5.6",
|
||||
"@types/node-forge": "^1.3.10",
|
||||
"@types/pem": "^1.14.4",
|
||||
"@types/semver": "^7.5.6",
|
||||
"@types/node-forge": "^1.3.11",
|
||||
"@types/semver": "^7.5.8",
|
||||
"@types/source-map-support": "^0.5.10",
|
||||
"@types/tar": "^6.1.10",
|
||||
"@types/whatwg-mimetype": "^3.0.2",
|
||||
"@types/ws": "^8.5.10",
|
||||
"adm-zip": "^0.5.10",
|
||||
"adm-zip": "^0.5.12",
|
||||
"body-parser": "^1.20.2",
|
||||
"cookie-parser": "^1.4.6",
|
||||
"debug": "^4.3.4",
|
||||
"dotenv": "^16.4.5",
|
||||
"engine.io": "^6.5.4",
|
||||
"express": "^4.18.2",
|
||||
"ffmpeg-static": "^5.2.0",
|
||||
"follow-redirects": "^1.15.4",
|
||||
"express": "^4.19.2",
|
||||
"follow-redirects": "^1.15.6",
|
||||
"http-auth": "^4.2.0",
|
||||
"ip": "^1.1.8",
|
||||
"level": "^8.0.0",
|
||||
"linkfs": "^2.1.0",
|
||||
"ip": "^2.0.1",
|
||||
"level": "^8.0.1",
|
||||
"lodash": "^4.17.21",
|
||||
"memfs": "^4.6.0",
|
||||
"mime": "^3.0.0",
|
||||
"nan": "^2.18.0",
|
||||
"nan": "^2.19.0",
|
||||
"node-dijkstra": "^2.5.0",
|
||||
"node-forge": "^1.3.1",
|
||||
"node-gyp": "^10.0.1",
|
||||
"node-pty-prebuilt-multiarch": "^0.10.1-pre.5",
|
||||
"node-gyp": "^10.1.0",
|
||||
"py": "npm:@bjia56/portable-python@^0.1.31",
|
||||
"router": "^1.3.8",
|
||||
"semver": "^7.5.4",
|
||||
"sharp": "^0.33.1",
|
||||
"semver": "^7.6.2",
|
||||
"sharp": "^0.33.3",
|
||||
"source-map-support": "^0.5.21",
|
||||
"tar": "^6.2.0",
|
||||
"tar": "^7.1.0",
|
||||
"tslib": "^2.6.2",
|
||||
"typescript": "^5.3.3",
|
||||
"typescript": "^5.4.5",
|
||||
"whatwg-mimetype": "^4.0.0",
|
||||
"ws": "^8.16.0"
|
||||
"ws": "^8.17.0"
|
||||
}
|
||||
},
|
||||
"@tsconfig/node10": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Home Assistant Addon Configuration
|
||||
name: Scrypted
|
||||
version: "20-jammy-full.s6-v0.99.0"
|
||||
version: "v0.105.0-jammy-full"
|
||||
slug: scrypted
|
||||
description: Scrypted is a high performance home video integration and automation platform
|
||||
url: "https://github.com/koush/scrypted"
|
||||
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
# Avahi can be used for network discovery by passing in the host daemon
|
||||
# or running the daemon inside the container. Choose one or the other.
|
||||
# Uncomment next line to run avahi-daemon inside the container.
|
||||
# See volumes section below to use the host daemon.
|
||||
# See volumes and security_opt section below to use the host daemon.
|
||||
# - SCRYPTED_DOCKER_AVAHI=true
|
||||
|
||||
# NVIDIA (Part 1 of 4)
|
||||
@@ -71,11 +71,16 @@ services:
|
||||
# Ensure Avahi is running on the host machine:
|
||||
# It can be installed with: sudo apt-get install avahi-daemon
|
||||
# This is not compatible with running avahi inside the container (see above).
|
||||
# Also, uncomment the lines under security_opt
|
||||
# - /var/run/dbus:/var/run/dbus
|
||||
# - /var/run/avahi-daemon/socket:/var/run/avahi-daemon/socket
|
||||
|
||||
# Default volume for the Scrypted database. Typically should not be changed.
|
||||
- ~/.scrypted/volume:/server/volume
|
||||
# Uncomment the following lines to use Avahi daemon from the host
|
||||
# Without this, AppArmor will block the container's attempt to talk to Avahi via dbus
|
||||
# security_opt:
|
||||
# - apparmor:unconfined
|
||||
devices: [
|
||||
# uncomment the common systems devices to pass
|
||||
# them through to docker.
|
||||
|
||||
@@ -1,13 +1,35 @@
|
||||
if [ "$(uname -m)" = "x86_64" ]
|
||||
then
|
||||
echo "Installing Intel graphics packages."
|
||||
apt-get update && apt-get install -y gpg-agent &&
|
||||
rm -f /usr/share/keyrings/intel-graphics.gpg &&
|
||||
curl -L https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor --yes --output /usr/share/keyrings/intel-graphics.gpg &&
|
||||
echo 'deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc' | tee /etc/apt/sources.list.d/intel.gpu.jammy.list &&
|
||||
apt-get -y update &&
|
||||
apt-get -y install intel-opencl-icd intel-media-va-driver-non-free &&
|
||||
# this script previvously apt install intel-media-va-driver-non-free, but that seems to no longer be necessary.
|
||||
|
||||
# the intel provided script is disabled since it does not work with the 6.8 kernel in Ubuntu 24.04 or Proxmox 8.2.
|
||||
# manual installation of the Intel graphics stuff is required.
|
||||
|
||||
# echo "Installing Intel graphics packages."
|
||||
# apt-get update && apt-get install -y gpg-agent &&
|
||||
# rm -f /usr/share/keyrings/intel-graphics.gpg &&
|
||||
# curl -L https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor --yes --output /usr/share/keyrings/intel-graphics.gpg &&
|
||||
# echo 'deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc' | tee /etc/apt/sources.list.d/intel.gpu.jammy.list &&
|
||||
# apt-get -y update &&
|
||||
# apt-get -y install intel-opencl-icd &&
|
||||
# apt-get -y dist-upgrade;
|
||||
|
||||
# manual installation
|
||||
# https://github.com/intel/compute-runtime/releases/tag/24.13.29138.7
|
||||
|
||||
rm -rf /tmp/neo && mkdir -p /tmp/neo && cd /tmp/neo &&
|
||||
apt-get install -y ocl-icd-libopencl1 &&
|
||||
curl -O -L https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.16510.2/intel-igc-core_1.0.16510.2_amd64.deb &&
|
||||
curl -O -L https://github.com/intel/intel-graphics-compiler/releases/download/igc-1.0.16510.2/intel-igc-opencl_1.0.16510.2_amd64.deb &&
|
||||
curl -O -L https://github.com/intel/compute-runtime/releases/download/24.13.29138.7/intel-level-zero-gpu-dbgsym_1.3.29138.7_amd64.ddeb &&
|
||||
curl -O -L https://github.com/intel/compute-runtime/releases/download/24.13.29138.7/intel-level-zero-gpu_1.3.29138.7_amd64.deb &&
|
||||
curl -O -L https://github.com/intel/compute-runtime/releases/download/24.13.29138.7/intel-opencl-icd-dbgsym_24.13.29138.7_amd64.ddeb &&
|
||||
curl -O -L https://github.com/intel/compute-runtime/releases/download/24.13.29138.7/intel-opencl-icd_24.13.29138.7_amd64.deb &&
|
||||
curl -O -L https://github.com/intel/compute-runtime/releases/download/24.13.29138.7/libigdgmm12_22.3.18_amd64.deb &&
|
||||
dpkg -i *.deb &&
|
||||
cd /tmp && rm -rf /tmp/neo &&
|
||||
apt-get -y dist-upgrade;
|
||||
|
||||
exit $?
|
||||
else
|
||||
echo "Intel graphics will not be installed on this architecture."
|
||||
|
||||
@@ -61,6 +61,8 @@ then
|
||||
sudo apt-get -y install avahi-daemon
|
||||
sed -i 's/'#' - \/var\/run\/dbus/- \/var\/run\/dbus/g' $DOCKER_COMPOSE_YML
|
||||
sed -i 's/'#' - \/var\/run\/avahi-daemon/- \/var\/run\/avahi-daemon/g' $DOCKER_COMPOSE_YML
|
||||
sed -i 's/'#' security_opt:/security_opt:/g' $DOCKER_COMPOSE_YML
|
||||
sed -i 's/'#' - apparmor:unconfined/ - apparmor:unconfined/g' $DOCKER_COMPOSE_YML
|
||||
fi
|
||||
|
||||
echo "Setting permissions on $SCRYPTED_HOME"
|
||||
|
||||
@@ -72,6 +72,7 @@ function removescryptedfstab() {
|
||||
grep -v "scrypted-nvr" /etc/fstab > /tmp/fstab && cp /tmp/fstab /etc/fstab
|
||||
# ensure newline
|
||||
sed -i -e '$a\' /etc/fstab
|
||||
systemctl daemon-reload
|
||||
}
|
||||
|
||||
BLOCK_DEVICE="/dev/$1"
|
||||
@@ -95,7 +96,17 @@ then
|
||||
set +e
|
||||
|
||||
sync
|
||||
mkfs -F -t ext4 "$BLOCK_DEVICE"1
|
||||
PARTITION_DEVICE="$BLOCK_DEVICE"1
|
||||
if [ ! -e "$PARTITION_DEVICE" ]
|
||||
then
|
||||
PARTITION_DEVICE="$BLOCK_DEVICE"p1
|
||||
if [ ! -e "$PARTITION_DEVICE" ]
|
||||
then
|
||||
echo "Unable to determine block device partition from block device: $BLOCK_DEVICE"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
mkfs -F -t ext4 "$PARTITION_DEVICE"
|
||||
sync
|
||||
|
||||
# parse/evaluate blkid line as env vars
|
||||
@@ -119,6 +130,7 @@ then
|
||||
mkdir -p /mnt/scrypted-nvr
|
||||
echo "PARTLABEL=scrypted-nvr /mnt/scrypted-nvr ext4 defaults,nofail 0 0" >> /etc/fstab
|
||||
mount -a
|
||||
systemctl daemon-reload
|
||||
set +e
|
||||
|
||||
DIR="/mnt/scrypted-nvr"
|
||||
|
||||
@@ -97,7 +97,7 @@ echo "docker compose rm -rf"
|
||||
sudo -u $SERVICE_USER docker rm -f /scrypted /scrypted-watchtower 2> /dev/null
|
||||
|
||||
echo "Installing Scrypted..."
|
||||
RUN sudo -u $SERVICE_USER npx -y scrypted@latest install-server
|
||||
RUN sudo -u $SERVICE_USER npx -y scrypted@latest install-server $SCRYPTED_INSTALL_VERSION
|
||||
|
||||
cat > /etc/systemd/system/scrypted.service <<EOT
|
||||
|
||||
|
||||
@@ -121,7 +121,7 @@ then
|
||||
fi
|
||||
|
||||
echo "Installing Scrypted..."
|
||||
RUN $NPX_PATH -y scrypted@latest install-server
|
||||
RUN $NPX_PATH -y scrypted@latest install-server $SCRYPTED_INSTALL_VERSION
|
||||
|
||||
cat > ~/Library/LaunchAgents/app.scrypted.server.plist <<EOT
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
@@ -11,7 +11,7 @@ iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/in
|
||||
choco upgrade -y nodejs-lts --version=20.11.1
|
||||
|
||||
# Install VC Redist, which is necessary for portable python
|
||||
choco install vcredist140
|
||||
choco install -y vcredist140
|
||||
|
||||
# TODO: remove python install, and use portable python
|
||||
# Install Python
|
||||
@@ -26,7 +26,12 @@ $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";"
|
||||
py $SCRYPTED_WINDOWS_PYTHON_VERSION -m pip install --upgrade pip
|
||||
py $SCRYPTED_WINDOWS_PYTHON_VERSION -m pip install debugpy typing_extensions typing opencv-python
|
||||
|
||||
npx -y scrypted@latest install-server
|
||||
$SCRYPTED_INSTALL_VERSION=[System.Environment]::GetEnvironmentVariable("SCRYPTED_INSTALL_VERSION","User")
|
||||
if ($SCRYPTED_INSTALL_VERSION -eq $null) {
|
||||
npx -y scrypted@latest install-server
|
||||
} else {
|
||||
npx -y scrypted@latest install-server $SCRYPTED_INSTALL_VERSION
|
||||
}
|
||||
|
||||
$USER_HOME_ESCAPED = $env:USERPROFILE.replace('\', '\\')
|
||||
$SCRYPTED_HOME = $env:USERPROFILE + '\.scrypted'
|
||||
|
||||
@@ -1,10 +1,4 @@
|
||||
#!/bin/bash
|
||||
echo 'if (!process.version.startsWith("v18")) throw new Error("Node 18 is required. Install Node Version Manager (nvm) for versioned node installations. See https://github.com/koush/scrypted/pull/498#issuecomment-1373854020")' | node
|
||||
if [ "$?" != 0 ]
|
||||
then
|
||||
exit
|
||||
fi
|
||||
|
||||
echo ######################################
|
||||
echo "Setting up popular plugins."
|
||||
echo "Additional will need npm install manually."
|
||||
@@ -15,7 +9,7 @@ cd $(dirname $0)
|
||||
git submodule init
|
||||
git submodule update
|
||||
|
||||
for directory in sdk common server packages/client packages/auth-fetch
|
||||
for directory in sdk server common packages/client packages/auth-fetch
|
||||
do
|
||||
echo "$directory > npm install"
|
||||
pushd $directory
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { HttpFetchOptions, HttpFetchResponseType, checkStatus, fetcher, getFetchMethod, setDefaultHttpFetchAccept } from '../../../server/src/fetch';
|
||||
import { HttpFetchOptions, HttpFetchResponseType, checkStatus, createHeadersArray, fetcher, getFetchMethod, hasHeader, setDefaultHttpFetchAccept, setHeader } from '../../../server/src/fetch';
|
||||
|
||||
export interface AuthFetchCredentialState {
|
||||
username: string;
|
||||
@@ -74,15 +74,15 @@ export function createAuthFetch<B, M>(
|
||||
) {
|
||||
const authHttpFetch = async <T extends HttpFetchOptions<B>>(options: T & AuthFetchOptions): ReturnType<typeof h<T>> => {
|
||||
const method = getFetchMethod(options);
|
||||
const headers = new Headers(options.headers);
|
||||
const headers = createHeadersArray(options.headers);
|
||||
options.headers = headers;
|
||||
setDefaultHttpFetchAccept(headers, options.responseType);
|
||||
|
||||
const initialHeader = await getAuth(options, options.url, method);
|
||||
// try to provide an authorization if a session exists, but don't override Authorization if provided already.
|
||||
// 401 will trigger a proper auth.
|
||||
if (initialHeader && !headers.has('Authorization'))
|
||||
headers.set('Authorization', initialHeader);
|
||||
if (initialHeader && !hasHeader(headers, 'Authorization'))
|
||||
setHeader(headers, 'Authorization', initialHeader);
|
||||
|
||||
const initialResponse = await h({
|
||||
...options,
|
||||
@@ -126,7 +126,7 @@ export function createAuthFetch<B, M>(
|
||||
|
||||
const header = await getAuth(options, options.url, method);
|
||||
if (header)
|
||||
headers.set('Authorization', header);
|
||||
setHeader(headers, 'Authorization', header);
|
||||
|
||||
return h(options);
|
||||
}
|
||||
|
||||
4
packages/cli/package-lock.json
generated
4
packages/cli/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "scrypted",
|
||||
"version": "1.3.15",
|
||||
"version": "1.3.16",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "scrypted",
|
||||
"version": "1.3.15",
|
||||
"version": "1.3.16",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@scrypted/client": "^1.3.3",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "scrypted",
|
||||
"version": "1.3.15",
|
||||
"version": "1.3.16",
|
||||
"description": "",
|
||||
"main": "./dist/packages/cli/src/main.js",
|
||||
"bin": {
|
||||
|
||||
@@ -14,8 +14,12 @@ const EXIT_FILE = '.exit';
|
||||
const UPDATE_FILE = '.update';
|
||||
|
||||
async function runCommand(command: string, ...args: string[]) {
|
||||
if (os.platform() === 'win32')
|
||||
if (os.platform() === 'win32') {
|
||||
command += '.cmd';
|
||||
// wrap each argument in a quote to handle spaces in paths
|
||||
// https://github.com/nodejs/node/issues/38490#issuecomment-927330248
|
||||
args = args.map(arg => '"' + arg + '"');
|
||||
}
|
||||
console.log('running', command, ...args);
|
||||
const cp = child_process.spawn(command, args, {
|
||||
stdio: 'inherit',
|
||||
|
||||
4
plugins/amcrest/package-lock.json
generated
4
plugins/amcrest/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/amcrest",
|
||||
"version": "0.0.150",
|
||||
"version": "0.0.151",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/amcrest",
|
||||
"version": "0.0.150",
|
||||
"version": "0.0.151",
|
||||
"license": "Apache",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/amcrest",
|
||||
"version": "0.0.150",
|
||||
"version": "0.0.151",
|
||||
"description": "Amcrest Plugin for Scrypted",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache",
|
||||
|
||||
@@ -134,7 +134,7 @@ export interface AmcrestEventData {
|
||||
export enum AmcrestEvent {
|
||||
MotionStart = "Code=VideoMotion;action=Start",
|
||||
MotionStop = "Code=VideoMotion;action=Stop",
|
||||
MotionInfo = "Code=VideoMotionInfo;action=State",
|
||||
MotionInfo = "Code=VideoMotionInfo;action=State",
|
||||
AudioStart = "Code=AudioMutation;action=Start",
|
||||
AudioStop = "Code=AudioMutation;action=Stop",
|
||||
TalkInvite = "Code=_DoTalkAction_;action=Invite",
|
||||
@@ -263,6 +263,8 @@ export class AmcrestCameraClient {
|
||||
// make content type parsable as content disposition filename
|
||||
const cd = contentType.parse(ct);
|
||||
let { boundary } = cd.parameters;
|
||||
// amcrest may send "--myboundary" or "-- myboundary" (with a space)
|
||||
const altBoundary = `-- ${boundary}`;
|
||||
boundary = `--${boundary}`;
|
||||
const boundaryEnd = `${boundary}--`;
|
||||
|
||||
@@ -286,7 +288,7 @@ export class AmcrestCameraClient {
|
||||
this.console.log('ignoring dahua http body', body);
|
||||
continue;
|
||||
}
|
||||
if (ignore !== boundary) {
|
||||
if (ignore !== boundary && ignore !== altBoundary) {
|
||||
this.console.error('expected boundary but found', ignore);
|
||||
this.console.error(response.headers);
|
||||
throw new Error('expected boundary');
|
||||
|
||||
510
plugins/bticino/package-lock.json
generated
510
plugins/bticino/package-lock.json
generated
@@ -1,29 +1,26 @@
|
||||
{
|
||||
"name": "@scrypted/bticino",
|
||||
"version": "0.0.15",
|
||||
"version": "0.0.16",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/bticino",
|
||||
"version": "0.0.15",
|
||||
"version": "0.0.16",
|
||||
"dependencies": {
|
||||
"@slyoldfox/sip": "^0.0.6-1",
|
||||
"sdp": "^3.0.3",
|
||||
"stun": "^2.1.0",
|
||||
"uuid": "^8.3.2"
|
||||
"stun": "^2.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
"@scrypted/sdk": "file:../../sdk",
|
||||
"@types/node": "^16.9.6",
|
||||
"@types/uuid": "^8.3.4",
|
||||
"cross-env": "^7.0.3",
|
||||
"ts-node": "^10.9.1"
|
||||
}
|
||||
},
|
||||
"../../common": {
|
||||
"name": "@scrypted/common",
|
||||
"version": "1.0.1",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
@@ -39,8 +36,7 @@
|
||||
}
|
||||
},
|
||||
"../../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.3.14",
|
||||
"version": "0.3.29",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
@@ -89,18 +85,18 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@jridgewell/resolve-uri": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
|
||||
"integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==",
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
|
||||
"integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=6.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@jridgewell/sourcemap-codec": {
|
||||
"version": "1.4.14",
|
||||
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz",
|
||||
"integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==",
|
||||
"version": "1.4.15",
|
||||
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
|
||||
"integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@jridgewell/trace-mapping": {
|
||||
@@ -130,9 +126,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@tsconfig/node10": {
|
||||
"version": "1.0.9",
|
||||
"resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.9.tgz",
|
||||
"integrity": "sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==",
|
||||
"version": "1.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz",
|
||||
"integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@tsconfig/node12": {
|
||||
@@ -148,27 +144,21 @@
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@tsconfig/node16": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.3.tgz",
|
||||
"integrity": "sha512-yOlFc+7UtL/89t2ZhjPvvB/DeAr3r+Dq58IgzsFkOAvVC6NMJXmCGjbptdXdR9qsX7pKcTL+s87FtYREi2dEEQ==",
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz",
|
||||
"integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "16.18.16",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.16.tgz",
|
||||
"integrity": "sha512-ZOzvDRWp8dCVBmgnkIqYCArgdFOO9YzocZp8Ra25N/RStKiWvMOXHMz+GjSeVNe5TstaTmTWPucGJkDw0XXJWA==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/@types/uuid": {
|
||||
"version": "8.3.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-8.3.4.tgz",
|
||||
"integrity": "sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==",
|
||||
"version": "16.18.96",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.96.tgz",
|
||||
"integrity": "sha512-84iSqGXoO+Ha16j8pRZ/L90vDMKX04QTYMTfYeE1WrjWaZXuchBehGUZEpNgx7JnmlrIHdnABmpjrQjhCnNldQ==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/acorn": {
|
||||
"version": "8.8.2",
|
||||
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz",
|
||||
"integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==",
|
||||
"version": "8.11.3",
|
||||
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz",
|
||||
"integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==",
|
||||
"dev": true,
|
||||
"bin": {
|
||||
"acorn": "bin/acorn"
|
||||
@@ -178,9 +168,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/acorn-walk": {
|
||||
"version": "8.2.0",
|
||||
"resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz",
|
||||
"integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==",
|
||||
"version": "8.3.2",
|
||||
"resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.2.tgz",
|
||||
"integrity": "sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==",
|
||||
"dev": true,
|
||||
"engines": {
|
||||
"node": ">=0.4.0"
|
||||
@@ -229,12 +219,18 @@
|
||||
}
|
||||
},
|
||||
"node_modules/call-bind": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz",
|
||||
"integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==",
|
||||
"version": "1.0.7",
|
||||
"resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz",
|
||||
"integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==",
|
||||
"dependencies": {
|
||||
"function-bind": "^1.1.1",
|
||||
"get-intrinsic": "^1.0.2"
|
||||
"es-define-property": "^1.0.0",
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2",
|
||||
"get-intrinsic": "^1.2.4",
|
||||
"set-function-length": "^1.2.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
@@ -365,6 +361,22 @@
|
||||
"node": ">=0.10"
|
||||
}
|
||||
},
|
||||
"node_modules/define-data-property": {
|
||||
"version": "1.1.4",
|
||||
"resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
|
||||
"integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
|
||||
"dependencies": {
|
||||
"es-define-property": "^1.0.0",
|
||||
"es-errors": "^1.3.0",
|
||||
"gopd": "^1.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/diff": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz",
|
||||
@@ -382,6 +394,25 @@
|
||||
"is-arrayish": "^0.2.1"
|
||||
}
|
||||
},
|
||||
"node_modules/es-define-property": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz",
|
||||
"integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==",
|
||||
"dependencies": {
|
||||
"get-intrinsic": "^1.2.4"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-errors": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
||||
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/filter-obj": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/filter-obj/-/filter-obj-1.1.0.tgz",
|
||||
@@ -402,9 +433,12 @@
|
||||
}
|
||||
},
|
||||
"node_modules/function-bind": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
|
||||
"integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
||||
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/generate-function": {
|
||||
"version": "2.3.1",
|
||||
@@ -415,13 +449,29 @@
|
||||
}
|
||||
},
|
||||
"node_modules/get-intrinsic": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz",
|
||||
"integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==",
|
||||
"version": "1.2.4",
|
||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
|
||||
"integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
|
||||
"dependencies": {
|
||||
"function-bind": "^1.1.1",
|
||||
"has": "^1.0.3",
|
||||
"has-symbols": "^1.0.3"
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2",
|
||||
"has-proto": "^1.0.1",
|
||||
"has-symbols": "^1.0.3",
|
||||
"hasown": "^2.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/gopd": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
|
||||
"integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
|
||||
"dependencies": {
|
||||
"get-intrinsic": "^1.1.3"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
@@ -432,15 +482,26 @@
|
||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
||||
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
|
||||
},
|
||||
"node_modules/has": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
|
||||
"integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
|
||||
"node_modules/has-property-descriptors": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
|
||||
"integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
|
||||
"dependencies": {
|
||||
"function-bind": "^1.1.1"
|
||||
"es-define-property": "^1.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/has-proto": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz",
|
||||
"integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==",
|
||||
"engines": {
|
||||
"node": ">= 0.4.0"
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/has-symbols": {
|
||||
@@ -454,6 +515,17 @@
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/hasown": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
||||
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
||||
"dependencies": {
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/hosted-git-info": {
|
||||
"version": "2.8.9",
|
||||
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz",
|
||||
@@ -468,9 +540,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/ip": {
|
||||
"version": "1.1.8",
|
||||
"resolved": "https://registry.npmjs.org/ip/-/ip-1.1.8.tgz",
|
||||
"integrity": "sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg=="
|
||||
"version": "1.1.9",
|
||||
"resolved": "https://registry.npmjs.org/ip/-/ip-1.1.9.tgz",
|
||||
"integrity": "sha512-cyRxvOEpNHNtchU3Ln9KC/auJgup87llfQpQ+t5ghoC/UhL16SWzbueiCsdTnWmqAWl7LadfuwhlqmtOaqMHdQ=="
|
||||
},
|
||||
"node_modules/ip2buf": {
|
||||
"version": "2.0.0",
|
||||
@@ -486,11 +558,11 @@
|
||||
"integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg=="
|
||||
},
|
||||
"node_modules/is-core-module": {
|
||||
"version": "2.11.0",
|
||||
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz",
|
||||
"integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==",
|
||||
"version": "2.13.1",
|
||||
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz",
|
||||
"integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==",
|
||||
"dependencies": {
|
||||
"has": "^1.0.3"
|
||||
"hasown": "^2.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
@@ -671,9 +743,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/object-inspect": {
|
||||
"version": "1.12.3",
|
||||
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz",
|
||||
"integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g==",
|
||||
"version": "1.13.1",
|
||||
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz",
|
||||
"integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
@@ -789,11 +861,11 @@
|
||||
"integrity": "sha512-IgjKyaUSjsROSO8/D49Ab7hP8mJgTYcqApOqdPhLoPxAplXmkp+zRvsrSQjFn5by0rhm4VH0GAUELIPpx7B1yg=="
|
||||
},
|
||||
"node_modules/qs": {
|
||||
"version": "6.11.1",
|
||||
"resolved": "https://registry.npmjs.org/qs/-/qs-6.11.1.tgz",
|
||||
"integrity": "sha512-0wsrzgTz/kAVIeuxSjnpGC56rzYtr6JT/2BwEvMaPhFIoYa1aGO8LbzuU1R0uUYQkLpWBTOj0l/CLAJB64J6nQ==",
|
||||
"version": "6.12.1",
|
||||
"resolved": "https://registry.npmjs.org/qs/-/qs-6.12.1.tgz",
|
||||
"integrity": "sha512-zWmv4RSuB9r2mYQw3zxQuHWeU+42aKi1wWig/j4ele4ygELZ7PEO6MM7rim9oAQH2A5MWfsAVf/jPvTPgCbvUQ==",
|
||||
"dependencies": {
|
||||
"side-channel": "^1.0.4"
|
||||
"side-channel": "^1.0.6"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=0.6"
|
||||
@@ -865,11 +937,11 @@
|
||||
}
|
||||
},
|
||||
"node_modules/resolve": {
|
||||
"version": "1.22.1",
|
||||
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz",
|
||||
"integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==",
|
||||
"version": "1.22.8",
|
||||
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
|
||||
"integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
|
||||
"dependencies": {
|
||||
"is-core-module": "^2.9.0",
|
||||
"is-core-module": "^2.13.0",
|
||||
"path-parse": "^1.0.7",
|
||||
"supports-preserve-symlinks-flag": "^1.0.0"
|
||||
},
|
||||
@@ -912,6 +984,22 @@
|
||||
"semver": "bin/semver"
|
||||
}
|
||||
},
|
||||
"node_modules/set-function-length": {
|
||||
"version": "1.2.2",
|
||||
"resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
|
||||
"integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==",
|
||||
"dependencies": {
|
||||
"define-data-property": "^1.1.4",
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2",
|
||||
"get-intrinsic": "^1.2.4",
|
||||
"gopd": "^1.0.1",
|
||||
"has-property-descriptors": "^1.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/shebang-command": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
|
||||
@@ -934,13 +1022,17 @@
|
||||
}
|
||||
},
|
||||
"node_modules/side-channel": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz",
|
||||
"integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==",
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz",
|
||||
"integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==",
|
||||
"dependencies": {
|
||||
"call-bind": "^1.0.0",
|
||||
"get-intrinsic": "^1.0.2",
|
||||
"object-inspect": "^1.9.0"
|
||||
"call-bind": "^1.0.7",
|
||||
"es-errors": "^1.3.0",
|
||||
"get-intrinsic": "^1.2.4",
|
||||
"object-inspect": "^1.13.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
@@ -961,9 +1053,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/spdx-exceptions": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz",
|
||||
"integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A=="
|
||||
"version": "2.5.0",
|
||||
"resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz",
|
||||
"integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w=="
|
||||
},
|
||||
"node_modules/spdx-expression-parse": {
|
||||
"version": "3.0.1",
|
||||
@@ -975,9 +1067,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/spdx-license-ids": {
|
||||
"version": "3.0.13",
|
||||
"resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.13.tgz",
|
||||
"integrity": "sha512-XkD+zwiqXHikFZm4AX/7JSCXA98U5Db4AFd5XUg/+9UNtnH75+Z9KxtpYiJZx36mUDVOwH83pl7yvCer6ewM3w=="
|
||||
"version": "3.0.17",
|
||||
"resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.17.tgz",
|
||||
"integrity": "sha512-sh8PWc/ftMqAAdFiBu6Fy6JUOYjqDJBJvIhpfDMyHrr0Rbp5liZqd4TjtQ/RgfLjKFZb+LMx5hpml5qOWy0qvg=="
|
||||
},
|
||||
"node_modules/split-on-first": {
|
||||
"version": "1.1.0",
|
||||
@@ -1054,9 +1146,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/ts-node": {
|
||||
"version": "10.9.1",
|
||||
"resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz",
|
||||
"integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==",
|
||||
"version": "10.9.2",
|
||||
"resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz",
|
||||
"integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==",
|
||||
"dev": true,
|
||||
"dependencies": {
|
||||
"@cspotcode/source-map-support": "^0.8.0",
|
||||
@@ -1105,9 +1197,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/typescript": {
|
||||
"version": "5.0.2",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.2.tgz",
|
||||
"integrity": "sha512-wVORMBGO/FAs/++blGNeAVdbNKtIh1rbBL2EyQ1+J9lClJ93KiiKe8PmFIVdXhHcyv44SL9oglmfeSsndo0jRw==",
|
||||
"version": "5.4.5",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz",
|
||||
"integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==",
|
||||
"dev": true,
|
||||
"peer": true,
|
||||
"bin": {
|
||||
@@ -1115,7 +1207,7 @@
|
||||
"tsserver": "bin/tsserver"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12.20"
|
||||
"node": ">=14.17"
|
||||
}
|
||||
},
|
||||
"node_modules/universalify": {
|
||||
@@ -1126,14 +1218,6 @@
|
||||
"node": ">= 4.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/uuid": {
|
||||
"version": "8.3.2",
|
||||
"resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz",
|
||||
"integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==",
|
||||
"bin": {
|
||||
"uuid": "dist/bin/uuid"
|
||||
}
|
||||
},
|
||||
"node_modules/v8-compile-cache-lib": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz",
|
||||
@@ -1193,15 +1277,15 @@
|
||||
}
|
||||
},
|
||||
"@jridgewell/resolve-uri": {
|
||||
"version": "3.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
|
||||
"integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==",
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
|
||||
"integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
|
||||
"dev": true
|
||||
},
|
||||
"@jridgewell/sourcemap-codec": {
|
||||
"version": "1.4.14",
|
||||
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz",
|
||||
"integrity": "sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==",
|
||||
"version": "1.4.15",
|
||||
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
|
||||
"integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==",
|
||||
"dev": true
|
||||
},
|
||||
"@jridgewell/trace-mapping": {
|
||||
@@ -1255,9 +1339,9 @@
|
||||
"integrity": "sha512-PJBIAKS3aMsFTHeQLfAtVpZOduAqGNZZAEH6Kb15htGUcSJWHZ9r2LAjxm3fD4yWT9plYlO0CthcEVnlrrwQLA=="
|
||||
},
|
||||
"@tsconfig/node10": {
|
||||
"version": "1.0.9",
|
||||
"resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.9.tgz",
|
||||
"integrity": "sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==",
|
||||
"version": "1.0.11",
|
||||
"resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz",
|
||||
"integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==",
|
||||
"dev": true
|
||||
},
|
||||
"@tsconfig/node12": {
|
||||
@@ -1273,33 +1357,27 @@
|
||||
"dev": true
|
||||
},
|
||||
"@tsconfig/node16": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.3.tgz",
|
||||
"integrity": "sha512-yOlFc+7UtL/89t2ZhjPvvB/DeAr3r+Dq58IgzsFkOAvVC6NMJXmCGjbptdXdR9qsX7pKcTL+s87FtYREi2dEEQ==",
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/@tsconfig/node16/-/node16-1.0.4.tgz",
|
||||
"integrity": "sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==",
|
||||
"dev": true
|
||||
},
|
||||
"@types/node": {
|
||||
"version": "16.18.16",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.16.tgz",
|
||||
"integrity": "sha512-ZOzvDRWp8dCVBmgnkIqYCArgdFOO9YzocZp8Ra25N/RStKiWvMOXHMz+GjSeVNe5TstaTmTWPucGJkDw0XXJWA==",
|
||||
"dev": true
|
||||
},
|
||||
"@types/uuid": {
|
||||
"version": "8.3.4",
|
||||
"resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-8.3.4.tgz",
|
||||
"integrity": "sha512-c/I8ZRb51j+pYGAu5CrFMRxqZ2ke4y2grEBO5AUjgSkSk+qT2Ea+OdWElz/OiMf5MNpn2b17kuVBwZLQJXzihw==",
|
||||
"version": "16.18.96",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-16.18.96.tgz",
|
||||
"integrity": "sha512-84iSqGXoO+Ha16j8pRZ/L90vDMKX04QTYMTfYeE1WrjWaZXuchBehGUZEpNgx7JnmlrIHdnABmpjrQjhCnNldQ==",
|
||||
"dev": true
|
||||
},
|
||||
"acorn": {
|
||||
"version": "8.8.2",
|
||||
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz",
|
||||
"integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==",
|
||||
"version": "8.11.3",
|
||||
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz",
|
||||
"integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==",
|
||||
"dev": true
|
||||
},
|
||||
"acorn-walk": {
|
||||
"version": "8.2.0",
|
||||
"resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz",
|
||||
"integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==",
|
||||
"version": "8.3.2",
|
||||
"resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.2.tgz",
|
||||
"integrity": "sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==",
|
||||
"dev": true
|
||||
},
|
||||
"arg": {
|
||||
@@ -1336,12 +1414,15 @@
|
||||
}
|
||||
},
|
||||
"call-bind": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz",
|
||||
"integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==",
|
||||
"version": "1.0.7",
|
||||
"resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz",
|
||||
"integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==",
|
||||
"requires": {
|
||||
"function-bind": "^1.1.1",
|
||||
"get-intrinsic": "^1.0.2"
|
||||
"es-define-property": "^1.0.0",
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2",
|
||||
"get-intrinsic": "^1.2.4",
|
||||
"set-function-length": "^1.2.1"
|
||||
}
|
||||
},
|
||||
"camelcase": {
|
||||
@@ -1427,6 +1508,16 @@
|
||||
"resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.2.tgz",
|
||||
"integrity": "sha512-FqUYQ+8o158GyGTrMFJms9qh3CqTKvAqgqsTnkLI8sKu0028orqBhxNMFkFen0zGyg6epACD32pjVk58ngIErQ=="
|
||||
},
|
||||
"define-data-property": {
|
||||
"version": "1.1.4",
|
||||
"resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
|
||||
"integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
|
||||
"requires": {
|
||||
"es-define-property": "^1.0.0",
|
||||
"es-errors": "^1.3.0",
|
||||
"gopd": "^1.0.1"
|
||||
}
|
||||
},
|
||||
"diff": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz",
|
||||
@@ -1441,6 +1532,19 @@
|
||||
"is-arrayish": "^0.2.1"
|
||||
}
|
||||
},
|
||||
"es-define-property": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz",
|
||||
"integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==",
|
||||
"requires": {
|
||||
"get-intrinsic": "^1.2.4"
|
||||
}
|
||||
},
|
||||
"es-errors": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
||||
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="
|
||||
},
|
||||
"filter-obj": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/filter-obj/-/filter-obj-1.1.0.tgz",
|
||||
@@ -1455,9 +1559,9 @@
|
||||
}
|
||||
},
|
||||
"function-bind": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz",
|
||||
"integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A=="
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
||||
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="
|
||||
},
|
||||
"generate-function": {
|
||||
"version": "2.3.1",
|
||||
@@ -1468,13 +1572,23 @@
|
||||
}
|
||||
},
|
||||
"get-intrinsic": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.0.tgz",
|
||||
"integrity": "sha512-L049y6nFOuom5wGyRc3/gdTLO94dySVKRACj1RmJZBQXlbTMhtNIgkWkUHq+jYmZvKf14EW1EoJnnjbmoHij0Q==",
|
||||
"version": "1.2.4",
|
||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
|
||||
"integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
|
||||
"requires": {
|
||||
"function-bind": "^1.1.1",
|
||||
"has": "^1.0.3",
|
||||
"has-symbols": "^1.0.3"
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2",
|
||||
"has-proto": "^1.0.1",
|
||||
"has-symbols": "^1.0.3",
|
||||
"hasown": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"gopd": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
|
||||
"integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
|
||||
"requires": {
|
||||
"get-intrinsic": "^1.1.3"
|
||||
}
|
||||
},
|
||||
"graceful-fs": {
|
||||
@@ -1482,19 +1596,32 @@
|
||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
||||
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
|
||||
},
|
||||
"has": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
|
||||
"integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
|
||||
"has-property-descriptors": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
|
||||
"integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
|
||||
"requires": {
|
||||
"function-bind": "^1.1.1"
|
||||
"es-define-property": "^1.0.0"
|
||||
}
|
||||
},
|
||||
"has-proto": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz",
|
||||
"integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q=="
|
||||
},
|
||||
"has-symbols": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
|
||||
"integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A=="
|
||||
},
|
||||
"hasown": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
||||
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
||||
"requires": {
|
||||
"function-bind": "^1.1.2"
|
||||
}
|
||||
},
|
||||
"hosted-git-info": {
|
||||
"version": "2.8.9",
|
||||
"resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz",
|
||||
@@ -1506,9 +1633,9 @@
|
||||
"integrity": "sha512-BYqTHXTGUIvg7t1r4sJNKcbDZkL92nkXA8YtRpbjFHRHGDL/NtUeiBJMeE60kIFN/Mg8ESaWQvftaYMGJzQZCQ=="
|
||||
},
|
||||
"ip": {
|
||||
"version": "1.1.8",
|
||||
"resolved": "https://registry.npmjs.org/ip/-/ip-1.1.8.tgz",
|
||||
"integrity": "sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg=="
|
||||
"version": "1.1.9",
|
||||
"resolved": "https://registry.npmjs.org/ip/-/ip-1.1.9.tgz",
|
||||
"integrity": "sha512-cyRxvOEpNHNtchU3Ln9KC/auJgup87llfQpQ+t5ghoC/UhL16SWzbueiCsdTnWmqAWl7LadfuwhlqmtOaqMHdQ=="
|
||||
},
|
||||
"ip2buf": {
|
||||
"version": "2.0.0",
|
||||
@@ -1521,11 +1648,11 @@
|
||||
"integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg=="
|
||||
},
|
||||
"is-core-module": {
|
||||
"version": "2.11.0",
|
||||
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.11.0.tgz",
|
||||
"integrity": "sha512-RRjxlvLDkD1YJwDbroBHMb+cukurkDWNyHx7D3oNB5x9rb5ogcksMC5wHCadcXoo67gVr/+3GFySh3134zi6rw==",
|
||||
"version": "2.13.1",
|
||||
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.13.1.tgz",
|
||||
"integrity": "sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==",
|
||||
"requires": {
|
||||
"has": "^1.0.3"
|
||||
"hasown": "^2.0.0"
|
||||
}
|
||||
},
|
||||
"is-plain-obj": {
|
||||
@@ -1669,9 +1796,9 @@
|
||||
"integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A=="
|
||||
},
|
||||
"object-inspect": {
|
||||
"version": "1.12.3",
|
||||
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.3.tgz",
|
||||
"integrity": "sha512-geUvdk7c+eizMNUDkRpW1wJwgfOiOeHbxBR/hLXK1aT6zmVSO0jsQcs7fj6MGw89jC/cjGfLcNOrtMYtGqm81g=="
|
||||
"version": "1.13.1",
|
||||
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz",
|
||||
"integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ=="
|
||||
},
|
||||
"p-limit": {
|
||||
"version": "1.3.0",
|
||||
@@ -1760,11 +1887,11 @@
|
||||
"integrity": "sha512-IgjKyaUSjsROSO8/D49Ab7hP8mJgTYcqApOqdPhLoPxAplXmkp+zRvsrSQjFn5by0rhm4VH0GAUELIPpx7B1yg=="
|
||||
},
|
||||
"qs": {
|
||||
"version": "6.11.1",
|
||||
"resolved": "https://registry.npmjs.org/qs/-/qs-6.11.1.tgz",
|
||||
"integrity": "sha512-0wsrzgTz/kAVIeuxSjnpGC56rzYtr6JT/2BwEvMaPhFIoYa1aGO8LbzuU1R0uUYQkLpWBTOj0l/CLAJB64J6nQ==",
|
||||
"version": "6.12.1",
|
||||
"resolved": "https://registry.npmjs.org/qs/-/qs-6.12.1.tgz",
|
||||
"integrity": "sha512-zWmv4RSuB9r2mYQw3zxQuHWeU+42aKi1wWig/j4ele4ygELZ7PEO6MM7rim9oAQH2A5MWfsAVf/jPvTPgCbvUQ==",
|
||||
"requires": {
|
||||
"side-channel": "^1.0.4"
|
||||
"side-channel": "^1.0.6"
|
||||
}
|
||||
},
|
||||
"query-string": {
|
||||
@@ -1812,11 +1939,11 @@
|
||||
}
|
||||
},
|
||||
"resolve": {
|
||||
"version": "1.22.1",
|
||||
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.1.tgz",
|
||||
"integrity": "sha512-nBpuuYuY5jFsli/JIs1oldw6fOQCBioohqWZg/2hiaOybXOft4lonv85uDOKXdf8rhyK159cxU5cDcK/NKk8zw==",
|
||||
"version": "1.22.8",
|
||||
"resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.8.tgz",
|
||||
"integrity": "sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==",
|
||||
"requires": {
|
||||
"is-core-module": "^2.9.0",
|
||||
"is-core-module": "^2.13.0",
|
||||
"path-parse": "^1.0.7",
|
||||
"supports-preserve-symlinks-flag": "^1.0.0"
|
||||
}
|
||||
@@ -1836,6 +1963,19 @@
|
||||
"resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz",
|
||||
"integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g=="
|
||||
},
|
||||
"set-function-length": {
|
||||
"version": "1.2.2",
|
||||
"resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
|
||||
"integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==",
|
||||
"requires": {
|
||||
"define-data-property": "^1.1.4",
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2",
|
||||
"get-intrinsic": "^1.2.4",
|
||||
"gopd": "^1.0.1",
|
||||
"has-property-descriptors": "^1.0.2"
|
||||
}
|
||||
},
|
||||
"shebang-command": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
|
||||
@@ -1852,13 +1992,14 @@
|
||||
"dev": true
|
||||
},
|
||||
"side-channel": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz",
|
||||
"integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==",
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz",
|
||||
"integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==",
|
||||
"requires": {
|
||||
"call-bind": "^1.0.0",
|
||||
"get-intrinsic": "^1.0.2",
|
||||
"object-inspect": "^1.9.0"
|
||||
"call-bind": "^1.0.7",
|
||||
"es-errors": "^1.3.0",
|
||||
"get-intrinsic": "^1.2.4",
|
||||
"object-inspect": "^1.13.1"
|
||||
}
|
||||
},
|
||||
"signal-exit": {
|
||||
@@ -1876,9 +2017,9 @@
|
||||
}
|
||||
},
|
||||
"spdx-exceptions": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.3.0.tgz",
|
||||
"integrity": "sha512-/tTrYOC7PPI1nUAgx34hUpqXuyJG+DTHJTnIULG4rDygi4xu/tfgmq1e1cIRwRzwZgo4NLySi+ricLkZkw4i5A=="
|
||||
"version": "2.5.0",
|
||||
"resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz",
|
||||
"integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w=="
|
||||
},
|
||||
"spdx-expression-parse": {
|
||||
"version": "3.0.1",
|
||||
@@ -1890,9 +2031,9 @@
|
||||
}
|
||||
},
|
||||
"spdx-license-ids": {
|
||||
"version": "3.0.13",
|
||||
"resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.13.tgz",
|
||||
"integrity": "sha512-XkD+zwiqXHikFZm4AX/7JSCXA98U5Db4AFd5XUg/+9UNtnH75+Z9KxtpYiJZx36mUDVOwH83pl7yvCer6ewM3w=="
|
||||
"version": "3.0.17",
|
||||
"resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.17.tgz",
|
||||
"integrity": "sha512-sh8PWc/ftMqAAdFiBu6Fy6JUOYjqDJBJvIhpfDMyHrr0Rbp5liZqd4TjtQ/RgfLjKFZb+LMx5hpml5qOWy0qvg=="
|
||||
},
|
||||
"split-on-first": {
|
||||
"version": "1.1.0",
|
||||
@@ -1942,9 +2083,9 @@
|
||||
"integrity": "sha512-MTBWv3jhVjTU7XR3IQHllbiJs8sc75a80OEhB6or/q7pLTWgQ0bMGQXXYQSrSuXe6WiKWDZ5txXY5P59a/coVA=="
|
||||
},
|
||||
"ts-node": {
|
||||
"version": "10.9.1",
|
||||
"resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.1.tgz",
|
||||
"integrity": "sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==",
|
||||
"version": "10.9.2",
|
||||
"resolved": "https://registry.npmjs.org/ts-node/-/ts-node-10.9.2.tgz",
|
||||
"integrity": "sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==",
|
||||
"dev": true,
|
||||
"requires": {
|
||||
"@cspotcode/source-map-support": "^0.8.0",
|
||||
@@ -1968,9 +2109,9 @@
|
||||
"integrity": "sha512-8yyRd1ZdNp+AQLGqi3lTaA2k81JjlIZOyFQEsi7GQWBgirnQOxjqVtDEbYHM2Z4yFdJ5AQw0fxBLLnDCl6RXoQ=="
|
||||
},
|
||||
"typescript": {
|
||||
"version": "5.0.2",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.0.2.tgz",
|
||||
"integrity": "sha512-wVORMBGO/FAs/++blGNeAVdbNKtIh1rbBL2EyQ1+J9lClJ93KiiKe8PmFIVdXhHcyv44SL9oglmfeSsndo0jRw==",
|
||||
"version": "5.4.5",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.5.tgz",
|
||||
"integrity": "sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==",
|
||||
"dev": true,
|
||||
"peer": true
|
||||
},
|
||||
@@ -1979,11 +2120,6 @@
|
||||
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
|
||||
"integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
|
||||
},
|
||||
"uuid": {
|
||||
"version": "8.3.2",
|
||||
"resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz",
|
||||
"integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg=="
|
||||
},
|
||||
"v8-compile-cache-lib": {
|
||||
"version": "3.0.1",
|
||||
"resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/bticino",
|
||||
"version": "0.0.15",
|
||||
"version": "0.0.16",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
"prescrypted-setup-project": "scrypted-package-json",
|
||||
@@ -34,14 +34,12 @@
|
||||
"dependencies": {
|
||||
"@slyoldfox/sip": "^0.0.6-1",
|
||||
"sdp": "^3.0.3",
|
||||
"stun": "^2.1.0",
|
||||
"uuid": "^8.3.2"
|
||||
"stun": "^2.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
"@scrypted/sdk": "file:../../sdk",
|
||||
"@types/node": "^16.9.6",
|
||||
"@types/uuid": "^8.3.4",
|
||||
"cross-env": "^7.0.3",
|
||||
"ts-node": "^10.9.1"
|
||||
}
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
import { closeQuiet, createBindUdp, createBindZero, listenZeroSingleClient } from '@scrypted/common/src/listen-cluster';
|
||||
import { createBindUdp, listenZeroSingleClient } from '@scrypted/common/src/listen-cluster';
|
||||
import { sleep } from '@scrypted/common/src/sleep';
|
||||
import { RtspServer } from '@scrypted/common/src/rtsp-server';
|
||||
import { addTrackControls, parseSdp } from '@scrypted/common/src/sdp-utils';
|
||||
import sdk, { BinarySensor, Camera, DeviceProvider, FFmpegInput, HttpRequest, HttpRequestHandler, HttpResponse, Intercom, MediaObject, MediaStreamUrl, MotionSensor, PictureOptions, Reboot, ResponseMediaStreamOptions, ScryptedDeviceBase, ScryptedMimeTypes, Setting, Settings, SettingValue, VideoCamera, VideoClip, VideoClipOptions, VideoClips } from '@scrypted/sdk';
|
||||
import sdk, { BinarySensor, Camera, DeviceProvider, FFmpegInput, HttpRequest, HttpRequestHandler, HttpResponse, Intercom, MediaObject, MediaStreamUrl, MotionSensor, PictureOptions, Reboot, ResponseMediaStreamOptions, ScryptedDeviceBase, ScryptedInterface, ScryptedMimeTypes, Setting, Settings, SettingValue, VideoCamera, VideoClip, VideoClipOptions, VideoClips } from '@scrypted/sdk';
|
||||
import { SipCallSession } from '../../sip/src/sip-call-session';
|
||||
import { RtpDescription, getPayloadType, getSequenceNumber, isRtpMessagePayloadType, isStunMessage } from '../../sip/src/rtp-utils';
|
||||
import { VoicemailHandler } from './bticino-voicemailHandler';
|
||||
import { CompositeSipMessageHandler } from '../../sip/src/compositeSipMessageHandler';
|
||||
import { SipHelper } from './sip-helper';
|
||||
import child_process, { ChildProcess } from 'child_process';
|
||||
import dgram from 'dgram';
|
||||
import { BticinoStorageSettings } from './storage-settings';
|
||||
import { BticinoSipPlugin } from './main';
|
||||
import { BticinoSipLock } from './bticino-lock';
|
||||
import { ffmpegLogInitialOutput, safeKillFFmpeg, safePrintFFmpegArguments } from '@scrypted/common/src/media-helpers';
|
||||
import { safePrintFFmpegArguments } from '@scrypted/common/src/media-helpers';
|
||||
import { PersistentSipManager } from './persistent-sip-manager';
|
||||
import { InviteHandler } from './bticino-inviteHandler';
|
||||
import { SipOptions, SipRequest } from '../../sip/src/sip-manager';
|
||||
import { startRtpForwarderProcess } from '../../webrtc/src/rtp-forwarders';
|
||||
import fs from "fs"
|
||||
import url from "url"
|
||||
import path from 'path';
|
||||
@@ -37,8 +37,7 @@ export class BticinoSipCamera extends ScryptedDeviceBase implements MotionSensor
|
||||
|
||||
private session: SipCallSession
|
||||
private remoteRtpDescription: Promise<RtpDescription>
|
||||
private audioOutForwarder: dgram.Socket
|
||||
private audioOutProcess: ChildProcess
|
||||
private forwarder
|
||||
private refreshTimeout: NodeJS.Timeout
|
||||
public requestHandlers: CompositeSipMessageHandler = new CompositeSipMessageHandler()
|
||||
public incomingCallRequest : SipRequest
|
||||
@@ -276,21 +275,27 @@ export class BticinoSipCamera extends ScryptedDeviceBase implements MotionSensor
|
||||
}
|
||||
|
||||
async takePicture(option?: PictureOptions): Promise<MediaObject> {
|
||||
const thumbnailCacheTime : number = parseInt( this.storage?.getItem('thumbnailCacheTime') ) * 1000 || 300000
|
||||
const now = new Date().getTime()
|
||||
if( !this.lastImageRefresh || this.lastImageRefresh + thumbnailCacheTime < now ) {
|
||||
// get a proxy object to make sure we pass prebuffer when already watching a stream
|
||||
let cam : VideoCamera = sdk.systemManager.getDeviceById<VideoCamera>(this.id)
|
||||
let vs : MediaObject = await cam.getVideoStream()
|
||||
let buf : Buffer = await mediaManager.convertMediaObjectToBuffer(vs, 'image/jpeg');
|
||||
this.cachedImage = buf
|
||||
this.lastImageRefresh = new Date().getTime()
|
||||
this.console.log(`Camera picture updated and cached: ${this.lastImageRefresh} + cache time: ${thumbnailCacheTime} < ${now}`)
|
||||
|
||||
let rebroadcastEnabled = this.interfaces?.includes( "mixin:@scrypted/prebuffer-mixin")
|
||||
if( rebroadcastEnabled ) {
|
||||
const thumbnailCacheTime : number = parseInt( this.storage?.getItem('thumbnailCacheTime') ) * 1000 || 300000
|
||||
const now = new Date().getTime()
|
||||
if( !this.lastImageRefresh || this.lastImageRefresh + thumbnailCacheTime < now ) {
|
||||
// get a proxy object to make sure we pass prebuffer when already watching a stream
|
||||
let cam : VideoCamera = sdk.systemManager.getDeviceById<VideoCamera>(this.id)
|
||||
let vs : MediaObject = await cam.getVideoStream()
|
||||
let buf : Buffer = await mediaManager.convertMediaObjectToBuffer(vs, 'image/jpeg');
|
||||
this.cachedImage = buf
|
||||
this.lastImageRefresh = new Date().getTime()
|
||||
this.console.log(`Camera picture updated and cached: ${this.lastImageRefresh} + cache time: ${thumbnailCacheTime} < ${now}`)
|
||||
|
||||
} else {
|
||||
this.console.log(`Not refreshing camera picture: ${this.lastImageRefresh} + cache time: ${thumbnailCacheTime} < ${now}`)
|
||||
}
|
||||
|
||||
return mediaManager.createMediaObject(this.cachedImage, 'image/jpeg')
|
||||
} else {
|
||||
this.console.log(`Not refreshing camera picture: ${this.lastImageRefresh} + cache time: ${thumbnailCacheTime} < ${now}`)
|
||||
throw new Error("To enable snapshots, enable rebroadcast plugin or set a Snapshot URL in the Snapshot plugin to an external image.");
|
||||
}
|
||||
return mediaManager.createMediaObject(this.cachedImage, 'image/jpeg')
|
||||
}
|
||||
|
||||
async getPictureOptions(): Promise<PictureOptions[]> {
|
||||
@@ -317,52 +322,31 @@ export class BticinoSipCamera extends ScryptedDeviceBase implements MotionSensor
|
||||
this.session = await this.callIntercom( cleanup )
|
||||
}
|
||||
|
||||
|
||||
this.stopIntercom();
|
||||
|
||||
const ffmpegInput: FFmpegInput = JSON.parse((await mediaManager.convertMediaObjectToBuffer(media, ScryptedMimeTypes.FFmpegInput)).toString());
|
||||
|
||||
const audioOutForwarder = await createBindZero()
|
||||
this.audioOutForwarder = audioOutForwarder.server
|
||||
const ffmpegInput = await sdk.mediaManager.convertMediaObjectToJSON<FFmpegInput>(media, ScryptedMimeTypes.FFmpegInput);
|
||||
let address = (await this.remoteRtpDescription).address
|
||||
audioOutForwarder.server.on('message', message => {
|
||||
if( this.session )
|
||||
this.session.audioSplitter.send(message, 40004, address)
|
||||
return null
|
||||
});
|
||||
|
||||
const args = ffmpegInput.inputArguments.slice();
|
||||
args.push(
|
||||
'-vn', '-dn', '-sn',
|
||||
'-acodec', 'speex',
|
||||
'-flags', '+global_header',
|
||||
'-ac', '1',
|
||||
'-ar', '8k',
|
||||
'-f', 'rtp',
|
||||
//'-srtp_out_suite', 'AES_CM_128_HMAC_SHA1_80',
|
||||
//'-srtp_out_params', encodeSrtpOptions(this.decodedSrtpOptions),
|
||||
`rtp://127.0.0.1:${audioOutForwarder.port}?pkt_size=188`,
|
||||
);
|
||||
|
||||
this.console.log("===========================================")
|
||||
safePrintFFmpegArguments( this.console, args )
|
||||
this.console.log("===========================================")
|
||||
|
||||
const cp = child_process.spawn(await mediaManager.getFFmpegPath(), args);
|
||||
ffmpegLogInitialOutput(this.console, cp)
|
||||
this.audioOutProcess = cp;
|
||||
cp.on('exit', () => this.console.log('two way audio ended'));
|
||||
this.session.onCallEnded.subscribe(() => {
|
||||
closeQuiet(audioOutForwarder.server);
|
||||
safeKillFFmpeg(cp)
|
||||
this.forwarder = await startRtpForwarderProcess(this.console, ffmpegInput, {
|
||||
audio: {
|
||||
codecCopy: 'speex',
|
||||
encoderArguments: [
|
||||
'-vn', '-sn', '-dn',
|
||||
'-acodec', 'speex',
|
||||
'-flags', '+global_header',
|
||||
'-ac', '1',
|
||||
'-ar', '8k',
|
||||
'-f', 'rtp',
|
||||
],
|
||||
onRtp: rtp => {
|
||||
this.session?.audioSplitter?.send(rtp, 40004, address)
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async stopIntercom(): Promise<void> {
|
||||
closeQuiet(this.audioOutForwarder)
|
||||
this.audioOutProcess?.kill('SIGKILL')
|
||||
this.audioOutProcess = undefined
|
||||
this.audioOutForwarder = undefined
|
||||
this.forwarder?.kill()
|
||||
this.forwarder = undefined
|
||||
}
|
||||
|
||||
resetStreamTimeout() {
|
||||
@@ -572,12 +556,24 @@ export class BticinoSipCamera extends ScryptedDeviceBase implements MotionSensor
|
||||
// Call the C300X
|
||||
this.remoteRtpDescription = sip.callOrAcceptInvite(
|
||||
( audio ) => {
|
||||
return [
|
||||
// this SDP is used by the intercom and will send the encrypted packets which we don't care about to the loopback on port 65000 of the intercom
|
||||
`m=audio 65000 RTP/SAVP 110`,
|
||||
`a=rtpmap:110 speex/8000`,
|
||||
`a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:${this.keyAndSalt}`,
|
||||
]
|
||||
let audioSection = [
|
||||
// this SDP is used by the intercom and will send the encrypted packets which we don't care about to the loopback on port 65000 of the intercom
|
||||
`m=audio 65000 RTP/SAVP 110`,
|
||||
`a=rtpmap:110 speex/8000`,
|
||||
`a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:${this.keyAndSalt}`,
|
||||
]
|
||||
if( !this.incomingCallRequest ) {
|
||||
let DEVADDR = this.storage.getItem('DEVADDR');
|
||||
if( DEVADDR ) {
|
||||
audioSection.unshift('a=DEVADDR:' + DEVADDR)
|
||||
} else {
|
||||
if( sipOptions.to.toLocaleLowerCase().indexOf('c300x') >= 0 || sipOptions.to.toLocaleLowerCase().indexOf('c100x') >= 0 ) {
|
||||
// Needed for bt_answering_machine (bticino specific), to check for c100X
|
||||
audioSection.unshift('a=DEVADDR:20')
|
||||
}
|
||||
}
|
||||
}
|
||||
return audioSection
|
||||
}, ( video ) => {
|
||||
return [
|
||||
// this SDP is used by the intercom and will send the encrypted packets which we don't care about to the loopback on port 65000 of the intercom
|
||||
|
||||
4
plugins/core/package-lock.json
generated
4
plugins/core/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/core",
|
||||
"version": "0.3.24",
|
||||
"version": "0.3.25",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/core",
|
||||
"version": "0.3.24",
|
||||
"version": "0.3.25",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/core",
|
||||
"version": "0.3.24",
|
||||
"version": "0.3.25",
|
||||
"description": "Scrypted Core plugin. Provides the UI, websocket, and engine.io APIs.",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -2,6 +2,7 @@ import fs from 'fs';
|
||||
import child_process from 'child_process';
|
||||
import { once } from 'events';
|
||||
import sdk from '@scrypted/sdk';
|
||||
import { stdout } from 'process';
|
||||
|
||||
export const SCRYPTED_INSTALL_ENVIRONMENT_LXC = 'lxc';
|
||||
|
||||
@@ -41,6 +42,31 @@ export async function checkLxcDependencies() {
|
||||
sdk.log.a('Failed to daemon-reload systemd.');
|
||||
}
|
||||
|
||||
try {
|
||||
// intel opencl icd is broken from their official apt repos on kernel versions 6.8, which ships with ubuntu 24.04 and proxmox 8.2.
|
||||
// the intel apt repo has not been updated yet.
|
||||
// the current workaround is to install the release manually.
|
||||
// https://github.com/intel/compute-runtime/releases/tag/24.13.29138.7
|
||||
const output = await new Promise<string>((r,f)=> child_process.exec("sh -c 'apt show versions intel-opencl-icd'", (err, stdout, stderr) => {
|
||||
if (err)
|
||||
f(err);
|
||||
else
|
||||
r(stdout + '\n' + stderr);
|
||||
}));
|
||||
|
||||
if (output.includes('Version: 23')) {
|
||||
const cp = child_process.spawn('sh', ['-c', 'curl https://raw.githubusercontent.com/koush/scrypted/main/install/docker/install-intel-graphics.sh | bash']);
|
||||
const [exitCode] = await once(cp, 'exit');
|
||||
if (exitCode !== 0)
|
||||
sdk.log.a('Failed to install intel-opencl-icd.');
|
||||
else
|
||||
needRestart = true;
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
sdk.log.a('Failed to verify/install intel-opencl-icd version.');
|
||||
}
|
||||
|
||||
if (needRestart)
|
||||
sdk.log.a('A system update is pending. Please restart Scrypted to apply changes.');
|
||||
}
|
||||
|
||||
4
plugins/coreml/package-lock.json
generated
4
plugins/coreml/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/coreml",
|
||||
"version": "0.1.49",
|
||||
"version": "0.1.54",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/coreml",
|
||||
"version": "0.1.49",
|
||||
"version": "0.1.54",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -42,5 +42,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.49"
|
||||
"version": "0.1.54"
|
||||
}
|
||||
|
||||
@@ -26,14 +26,13 @@ predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "CoreML-Predict")
|
||||
|
||||
availableModels = [
|
||||
"Default",
|
||||
"scrypted_yolov10m_320",
|
||||
"scrypted_yolov10n_320",
|
||||
"scrypted_yolo_nas_s_320",
|
||||
"scrypted_yolov9c_320",
|
||||
"scrypted_yolov9c",
|
||||
"scrypted_yolov6n_320",
|
||||
"scrypted_yolov6n",
|
||||
"scrypted_yolov6s_320",
|
||||
"scrypted_yolov6s",
|
||||
"scrypted_yolov8n_320",
|
||||
"scrypted_yolov8n",
|
||||
"ssdlite_mobilenet_v2",
|
||||
"yolov4-tiny",
|
||||
]
|
||||
@@ -77,6 +76,8 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
|
||||
self.storage.setItem("model", "Default")
|
||||
model = "scrypted_yolov9c_320"
|
||||
self.yolo = "yolo" in model
|
||||
self.scrypted_yolov10n = "scrypted_yolov10" in model
|
||||
self.scrypted_yolo_nas = "scrypted_yolo_nas" in model
|
||||
self.scrypted_yolo = "scrypted_yolo" in model
|
||||
self.scrypted_model = "scrypted" in model
|
||||
model_version = "v7"
|
||||
@@ -132,6 +133,8 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.minThreshold = 0.2
|
||||
|
||||
self.faceDevice = None
|
||||
self.textDevice = None
|
||||
asyncio.ensure_future(self.prepareRecognitionModels(), loop=self.loop)
|
||||
|
||||
async def prepareRecognitionModels(self):
|
||||
@@ -169,9 +172,11 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
|
||||
|
||||
async def getDevice(self, nativeId: str) -> Any:
|
||||
if nativeId == "facerecognition":
|
||||
return CoreMLFaceRecognition(nativeId)
|
||||
self.faceDevice = self.faceDevice or CoreMLFaceRecognition(nativeId)
|
||||
return self.faceDevice
|
||||
if nativeId == "textrecognition":
|
||||
return CoreMLTextRecognition(nativeId)
|
||||
self.textDevice = self.textDevice or CoreMLTextRecognition(nativeId)
|
||||
return self.textDevice
|
||||
raise Exception("unknown device")
|
||||
|
||||
async def getSettings(self) -> list[Setting]:
|
||||
@@ -211,6 +216,18 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
|
||||
if self.yolo:
|
||||
out_dict = await self.queue_batch({self.input_name: input})
|
||||
|
||||
if self.scrypted_yolov10n:
|
||||
results = list(out_dict.values())[0][0]
|
||||
objs = yolo.parse_yolov10(results)
|
||||
ret = self.create_detection_result(objs, src_size, cvss)
|
||||
return ret
|
||||
|
||||
if self.scrypted_yolo_nas:
|
||||
predictions = list(out_dict.values())
|
||||
objs = yolo.parse_yolo_nas(predictions)
|
||||
ret = self.create_detection_result(objs, src_size, cvss)
|
||||
return ret
|
||||
|
||||
if self.scrypted_yolo:
|
||||
results = list(out_dict.values())[0][0]
|
||||
objs = yolo.parse_yolov9(results)
|
||||
|
||||
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
import concurrent.futures
|
||||
import os
|
||||
|
||||
import asyncio
|
||||
import coremltools as ct
|
||||
import numpy as np
|
||||
# import Quartz
|
||||
@@ -10,6 +11,7 @@ import numpy as np
|
||||
|
||||
# import Vision
|
||||
from predict.face_recognize import FaceRecognizeDetection
|
||||
from PIL import Image
|
||||
|
||||
|
||||
def euclidean_distance(arr1, arr2):
|
||||
@@ -29,6 +31,8 @@ predictExecutor = concurrent.futures.ThreadPoolExecutor(8, "Vision-Predict")
|
||||
class CoreMLFaceRecognition(FaceRecognizeDetection):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(nativeId=nativeId)
|
||||
self.detectExecutor = concurrent.futures.ThreadPoolExecutor(1, "detect-face")
|
||||
self.recogExecutor = concurrent.futures.ThreadPoolExecutor(1, "recog-face")
|
||||
|
||||
def downloadModel(self, model: str):
|
||||
model_version = "v7"
|
||||
@@ -51,23 +55,29 @@ class CoreMLFaceRecognition(FaceRecognizeDetection):
|
||||
inputName = model.get_spec().description.input[0].name
|
||||
return model, inputName
|
||||
|
||||
def predictDetectModel(self, input):
|
||||
model, inputName = self.detectModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
results = list(out_dict.values())[0][0]
|
||||
async def predictDetectModel(self, input: Image.Image):
|
||||
def predict():
|
||||
model, inputName = self.detectModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
results = list(out_dict.values())[0][0]
|
||||
return results
|
||||
|
||||
results = await asyncio.get_event_loop().run_in_executor(
|
||||
self.detectExecutor, lambda: predict()
|
||||
)
|
||||
return results
|
||||
|
||||
def predictFaceModel(self, input):
|
||||
model, inputName = self.faceModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
return out_dict["var_2167"][0]
|
||||
async def predictFaceModel(self, input: np.ndarray):
|
||||
def predict():
|
||||
model, inputName = self.faceModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
results = out_dict["var_2167"][0]
|
||||
return results
|
||||
results = await asyncio.get_event_loop().run_in_executor(
|
||||
self.recogExecutor, lambda: predict()
|
||||
)
|
||||
return results
|
||||
|
||||
def predictTextModel(self, input):
|
||||
model, inputName = self.textModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
preds = out_dict["linear_2"]
|
||||
return preds
|
||||
|
||||
# def predictVision(self, input: Image.Image) -> asyncio.Future[list[Prediction]]:
|
||||
# buffer = input.tobytes()
|
||||
# myData = NSData.alloc().initWithBytes_length_(buffer, len(buffer))
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import concurrent.futures
|
||||
import os
|
||||
|
||||
import asyncio
|
||||
|
||||
import coremltools as ct
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from predict.text_recognize import TextRecognition
|
||||
|
||||
@@ -11,6 +16,9 @@ class CoreMLTextRecognition(TextRecognition):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(nativeId=nativeId)
|
||||
|
||||
self.detectExecutor = concurrent.futures.ThreadPoolExecutor(1, "detect-text")
|
||||
self.recogExecutor = concurrent.futures.ThreadPoolExecutor(1, "recog-text")
|
||||
|
||||
def downloadModel(self, model: str):
|
||||
model_version = "v7"
|
||||
mlmodel = "model"
|
||||
@@ -32,14 +40,24 @@ class CoreMLTextRecognition(TextRecognition):
|
||||
inputName = model.get_spec().description.input[0].name
|
||||
return model, inputName
|
||||
|
||||
def predictDetectModel(self, input):
|
||||
model, inputName = self.detectModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
results = list(out_dict.values())[0]
|
||||
async def predictDetectModel(self, input: Image.Image):
|
||||
def predict():
|
||||
model, inputName = self.detectModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
results = list(out_dict.values())[0]
|
||||
return results
|
||||
results = await asyncio.get_event_loop().run_in_executor(
|
||||
self.detectExecutor, lambda: predict()
|
||||
)
|
||||
return results
|
||||
|
||||
def predictTextModel(self, input):
|
||||
model, inputName = self.textModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
preds = out_dict["linear_2"]
|
||||
async def predictTextModel(self, input: np.ndarray):
|
||||
def predict():
|
||||
model, inputName = self.textModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
preds = out_dict["linear_2"]
|
||||
return preds
|
||||
preds = await asyncio.get_event_loop().run_in_executor(
|
||||
self.recogExecutor, lambda: predict()
|
||||
)
|
||||
return preds
|
||||
|
||||
38
plugins/homekit/package-lock.json
generated
38
plugins/homekit/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/homekit",
|
||||
"version": "1.2.54",
|
||||
"version": "1.2.57",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/homekit",
|
||||
"version": "1.2.54",
|
||||
"version": "1.2.57",
|
||||
"dependencies": {
|
||||
"@koush/werift-src": "file:../../external/werift",
|
||||
"check-disk-space": "^3.4.0",
|
||||
@@ -47,26 +47,20 @@
|
||||
"examples/*"
|
||||
],
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "^1.4.1",
|
||||
"@types/jest": "^29.5.11",
|
||||
"@types/node": "^20.10.4",
|
||||
"@typescript-eslint/eslint-plugin": "^6.14.0",
|
||||
"@typescript-eslint/parser": "^6.14.0",
|
||||
"eslint": "^8.55.0",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-plugin-prettier": "^5.0.1",
|
||||
"eslint-plugin-simple-import-sort": "^10.0.0",
|
||||
"@types/node": "^20.10.6",
|
||||
"jest": "^29.7.0",
|
||||
"knip": "^3.7.0",
|
||||
"knip": "^3.9.0",
|
||||
"node-actionlint": "^1.2.2",
|
||||
"organize-imports-cli": "^0.10.0",
|
||||
"prettier": "^3.1.1",
|
||||
"process": "^0.11.10",
|
||||
"ts-jest": "^29.1.1",
|
||||
"ts-node": "^10.9.2",
|
||||
"ts-node-dev": "^2.0.0",
|
||||
"typedoc": "0.25.4",
|
||||
"typedoc": "0.25.5",
|
||||
"typedoc-plugin-markdown": "3.17.1",
|
||||
"typescript": "5.0.4"
|
||||
"typescript": "5.3.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=16"
|
||||
@@ -127,7 +121,7 @@
|
||||
},
|
||||
"../../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.3.18",
|
||||
"version": "0.3.29",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
@@ -1306,26 +1300,20 @@
|
||||
"@koush/werift-src": {
|
||||
"version": "file:../../external/werift",
|
||||
"requires": {
|
||||
"@biomejs/biome": "^1.4.1",
|
||||
"@types/jest": "^29.5.11",
|
||||
"@types/node": "^20.10.4",
|
||||
"@typescript-eslint/eslint-plugin": "^6.14.0",
|
||||
"@typescript-eslint/parser": "^6.14.0",
|
||||
"eslint": "^8.55.0",
|
||||
"eslint-config-prettier": "^9.1.0",
|
||||
"eslint-plugin-prettier": "^5.0.1",
|
||||
"eslint-plugin-simple-import-sort": "^10.0.0",
|
||||
"@types/node": "^20.10.6",
|
||||
"jest": "^29.7.0",
|
||||
"knip": "^3.7.0",
|
||||
"knip": "^3.9.0",
|
||||
"node-actionlint": "^1.2.2",
|
||||
"organize-imports-cli": "^0.10.0",
|
||||
"prettier": "^3.1.1",
|
||||
"process": "^0.11.10",
|
||||
"ts-jest": "^29.1.1",
|
||||
"ts-node": "^10.9.2",
|
||||
"ts-node-dev": "^2.0.0",
|
||||
"typedoc": "0.25.4",
|
||||
"typedoc": "0.25.5",
|
||||
"typedoc-plugin-markdown": "3.17.1",
|
||||
"typescript": "5.0.4"
|
||||
"typescript": "5.3.3"
|
||||
}
|
||||
},
|
||||
"@leichtgewicht/ip-codec": {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/homekit",
|
||||
"version": "1.2.54",
|
||||
"version": "1.2.57",
|
||||
"description": "HomeKit Plugin for Scrypted",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
|
||||
@@ -166,10 +166,12 @@ export class HomeKitPlugin extends ScryptedDeviceBase implements MixinProvider,
|
||||
case MDNSAdvertiser.CIAO:
|
||||
break;
|
||||
default:
|
||||
if (fs.existsSync('/var/run/avahi-daemon/'))
|
||||
advertiser = MDNSAdvertiser.AVAHI;
|
||||
else
|
||||
advertiser = MDNSAdvertiser.CIAO;
|
||||
advertiser = MDNSAdvertiser.CIAO;
|
||||
// this avahi detection doesn't work sometimes? fails silently.
|
||||
// if (fs.existsSync('/var/run/avahi-daemon/'))
|
||||
// advertiser = MDNSAdvertiser.AVAHI;
|
||||
// else
|
||||
// advertiser = MDNSAdvertiser.CIAO;
|
||||
break;
|
||||
}
|
||||
return advertiser;
|
||||
@@ -267,8 +269,6 @@ export class HomeKitPlugin extends ScryptedDeviceBase implements MixinProvider,
|
||||
},
|
||||
undefined, 'Pairing'));
|
||||
storageSettings.settings.pincode.persistedDefaultValue = randomPinCode();
|
||||
// TODO: change this value after this current default has been persisted to existing clients.
|
||||
// changing it now will cause existing accessories be renamed.
|
||||
storageSettings.settings.addIdentifyingMaterial.persistedDefaultValue = false;
|
||||
|
||||
const mixinConsole = deviceManager.getMixinConsole(device.id, this.nativeId);
|
||||
|
||||
@@ -117,7 +117,7 @@ addSupportedType({
|
||||
},
|
||||
closeRecordingStream(streamId, reason) {
|
||||
const r = openRecordingStreams.get(streamId);
|
||||
r?.throw(new Error(reason?.toString()));
|
||||
console.log(`motion recording closed ${reason > 0 ? `(error code: ${reason})` : ''}`);
|
||||
openRecordingStreams.delete(streamId);
|
||||
},
|
||||
updateRecordingActive(active) {
|
||||
|
||||
@@ -321,6 +321,9 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
|
||||
let moov: Buffer[];
|
||||
|
||||
for await (const box of generator) {
|
||||
if (!isOpen())
|
||||
return;
|
||||
|
||||
const { header, type, data } = box;
|
||||
// console.log('motion fragment box', type);
|
||||
|
||||
@@ -352,6 +355,8 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
|
||||
needSkip = false;
|
||||
continue;
|
||||
}
|
||||
if (!isOpen())
|
||||
return;
|
||||
const fragment = Buffer.concat(pending);
|
||||
saveFragment(i, fragment);
|
||||
pending = [];
|
||||
@@ -361,8 +366,6 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
|
||||
data: fragment,
|
||||
isLast,
|
||||
}
|
||||
if (!isOpen())
|
||||
return;
|
||||
yield recordingPacket;
|
||||
if (wasLast)
|
||||
break;
|
||||
@@ -370,7 +373,7 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
console.log(`motion recording completed ${e}`);
|
||||
console.log(`motion recording error ${e}`);
|
||||
}
|
||||
finally {
|
||||
console.log(`motion recording finished`);
|
||||
|
||||
@@ -24,8 +24,6 @@ export function createSnapshotHandler(device: ScryptedDevice & VideoCamera & Cam
|
||||
width: request.width,
|
||||
height: request.height,
|
||||
},
|
||||
// wait up to 2 seconds for the snapshot image, fallback to cached image
|
||||
timeout: 2000,
|
||||
})
|
||||
return await mediaManager.convertMediaObjectToBuffer(media, 'image/jpeg');
|
||||
}
|
||||
|
||||
@@ -354,15 +354,11 @@ export function createCameraStreamingDelegate(device: ScryptedDevice & VideoCame
|
||||
if (twoWayAudio) {
|
||||
let rtspServer: RtspServer;
|
||||
let track: string;
|
||||
let playing = false;
|
||||
session.audioReturn.once('message', async buffer => {
|
||||
let twoWayAudioState: 'stopped' | 'starting' | 'started' = 'stopped';
|
||||
|
||||
const start = async () => {
|
||||
try {
|
||||
const decrypted = srtpSession.decrypt(buffer);
|
||||
const rtp = RtpPacket.deSerialize(decrypted);
|
||||
|
||||
if (rtp.header.payloadType !== session.startRequest.audio.pt)
|
||||
return;
|
||||
|
||||
twoWayAudioState = 'starting';
|
||||
const { clientPromise, url } = await listenZeroSingleClient();
|
||||
const rtspUrl = url.replace('tcp', 'rtsp');
|
||||
let sdp = createReturnAudioSdp(session.startRequest.audio);
|
||||
@@ -393,7 +389,7 @@ export function createCameraStreamingDelegate(device: ScryptedDevice & VideoCame
|
||||
device.stopIntercom();
|
||||
client.destroy();
|
||||
rtspServer = undefined;
|
||||
playing = false;
|
||||
twoWayAudioState = 'stopped';
|
||||
}
|
||||
// stop the intercom if the client dies for any reason.
|
||||
// allow the streaming session to continue however.
|
||||
@@ -402,16 +398,17 @@ export function createCameraStreamingDelegate(device: ScryptedDevice & VideoCame
|
||||
|
||||
rtspServer = new RtspServer(client, sdp);
|
||||
await rtspServer.handlePlayback();
|
||||
playing = true;
|
||||
twoWayAudioState = 'started';
|
||||
}
|
||||
catch (e) {
|
||||
console.error('two way audio failed', e);
|
||||
twoWayAudioState = 'stopped';
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const srtpSession = new SrtpSession(session.aconfig);
|
||||
session.audioReturn.on('message', buffer => {
|
||||
if (!playing)
|
||||
if (twoWayAudioState === 'starting')
|
||||
return;
|
||||
|
||||
const decrypted = srtpSession.decrypt(buffer);
|
||||
@@ -420,6 +417,9 @@ export function createCameraStreamingDelegate(device: ScryptedDevice & VideoCame
|
||||
if (rtp.header.payloadType !== session.startRequest.audio.pt)
|
||||
return;
|
||||
|
||||
if (twoWayAudioState !== 'started')
|
||||
return start();
|
||||
|
||||
rtspServer.sendTrack(track, decrypted, false);
|
||||
});
|
||||
}
|
||||
|
||||
4
plugins/objectdetector/package-lock.json
generated
4
plugins/objectdetector/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/objectdetector",
|
||||
"version": "0.1.39",
|
||||
"version": "0.1.42",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/objectdetector",
|
||||
"version": "0.1.39",
|
||||
"version": "0.1.42",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/objectdetector",
|
||||
"version": "0.1.39",
|
||||
"version": "0.1.42",
|
||||
"description": "Scrypted Video Analysis Plugin. Installed alongside a detection service like OpenCV or TensorFlow.",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -1159,7 +1159,7 @@ export class ObjectDetectionPlugin extends AutoenableMixinProvider implements Se
|
||||
async releaseDevice(id: string, nativeId: string): Promise<void> {
|
||||
if (nativeId?.startsWith(SMART_MOTIONSENSOR_PREFIX)) {
|
||||
const smart = this.devices.get(nativeId) as SmartMotionSensor;
|
||||
smart?.listener?.removeListener();
|
||||
smart?.detectionListener?.removeListener();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import sdk, { Camera, EventListenerRegister, MediaObject, MotionSensor, ObjectDetector, ObjectsDetected, Readme, RequestPictureOptions, ResponsePictureOptions, ScryptedDevice, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedNativeId, Setting, SettingValue, Settings } from "@scrypted/sdk";
|
||||
import { StorageSetting, StorageSettings } from "@scrypted/sdk/storage-settings";
|
||||
import type { ObjectDetectionPlugin } from "./main";
|
||||
import { levenshteinDistance } from "./edit-distance";
|
||||
import type { ObjectDetectionPlugin } from "./main";
|
||||
|
||||
export const SMART_MOTIONSENSOR_PREFIX = 'smart-motionsensor-';
|
||||
export const SMART_OCCUPANCYSENSOR_PREFIX = 'smart-occupancysensor-';
|
||||
|
||||
export function createObjectDetectorStorageSetting(): StorageSetting {
|
||||
return {
|
||||
@@ -26,7 +27,7 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
|
||||
},
|
||||
detectionTimeout: {
|
||||
title: 'Object Detection Timeout',
|
||||
description: 'Duration in seconds the sensor will report motion, before resetting.',
|
||||
description: 'Duration in seconds the sensor will report motion, before resetting. Setting this to 0 will reset the sensor when motion stops.',
|
||||
type: 'number',
|
||||
defaultValue: 60,
|
||||
},
|
||||
@@ -71,9 +72,17 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
|
||||
type: 'number',
|
||||
defaultValue: 2,
|
||||
},
|
||||
labelScore: {
|
||||
group: 'Recognition',
|
||||
title: 'Label Score',
|
||||
description: 'The minimum score required for a label to trigger the motion sensor.',
|
||||
type: 'number',
|
||||
defaultValue: 0,
|
||||
}
|
||||
});
|
||||
|
||||
listener: EventListenerRegister;
|
||||
detectionListener: EventListenerRegister;
|
||||
motionListener: EventListenerRegister;
|
||||
timeout: NodeJS.Timeout;
|
||||
lastPicture: Promise<MediaObject>;
|
||||
|
||||
@@ -143,8 +152,10 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
|
||||
|
||||
trigger() {
|
||||
this.resetTrigger();
|
||||
const duration: number = this.storageSettings.values.detectionTimeout;
|
||||
this.motionDetected = true;
|
||||
const duration: number = this.storageSettings.values.detectionTimeout;
|
||||
if (!duration)
|
||||
return;
|
||||
this.timeout = setTimeout(() => {
|
||||
this.motionDetected = false;
|
||||
}, duration * 1000);
|
||||
@@ -152,12 +163,14 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
|
||||
|
||||
rebind() {
|
||||
this.motionDetected = false;
|
||||
this.listener?.removeListener();
|
||||
this.listener = undefined;
|
||||
this.detectionListener?.removeListener();
|
||||
this.detectionListener = undefined;
|
||||
this.motionListener?.removeListener();
|
||||
this.motionListener = undefined;
|
||||
this.resetTrigger();
|
||||
|
||||
|
||||
const objectDetector: ObjectDetector & ScryptedDevice = this.storageSettings.values.objectDetector;
|
||||
const objectDetector: ObjectDetector & MotionSensor & ScryptedDevice = this.storageSettings.values.objectDetector;
|
||||
if (!objectDetector)
|
||||
return;
|
||||
|
||||
@@ -167,13 +180,25 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
|
||||
|
||||
const console = sdk.deviceManager.getMixinConsole(objectDetector.id, this.nativeId);
|
||||
|
||||
this.listener = objectDetector.listen(ScryptedInterface.ObjectDetector, (source, details, data) => {
|
||||
this.motionListener = objectDetector.listen({
|
||||
event: ScryptedInterface.MotionSensor,
|
||||
watch: true,
|
||||
}, (source, details, data) => {
|
||||
const duration: number = this.storageSettings.values.detectionTimeout;
|
||||
if (duration)
|
||||
return;
|
||||
|
||||
if (!objectDetector.motionDetected)
|
||||
this.motionDetected = false;
|
||||
});
|
||||
|
||||
this.detectionListener = objectDetector.listen(ScryptedInterface.ObjectDetector, (source, details, data) => {
|
||||
const detected: ObjectsDetected = data;
|
||||
|
||||
if (this.storageSettings.values.requireDetectionThumbnail && !detected.detectionId)
|
||||
return false;
|
||||
|
||||
const { labels, labelDistance } = this.storageSettings.values;
|
||||
const { labels, labelDistance, labelScore } = this.storageSettings.values;
|
||||
|
||||
const match = detected.detections?.find(d => {
|
||||
if (this.storageSettings.values.requireScryptedNvrDetections && !d.boundingBox)
|
||||
@@ -208,13 +233,24 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
|
||||
return false;
|
||||
|
||||
for (const label of labels) {
|
||||
if (label === d.label)
|
||||
return true;
|
||||
if (label === d.label) {
|
||||
if (!labelScore || d.labelScore >= labelScore)
|
||||
return true;
|
||||
this.console.log('Label score too low.', d.labelScore);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!labelDistance)
|
||||
continue;
|
||||
if (levenshteinDistance(label, d.label) <= labelDistance)
|
||||
|
||||
if (levenshteinDistance(label, d.label) > labelDistance) {
|
||||
this.console.log('Label does not match.', label, d.label, d.labelScore);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!labelScore || d.labelScore >= labelScore)
|
||||
return true;
|
||||
this.console.log('Label does not match.', label, d.label);
|
||||
this.console.log('Label score too low.', d.labelScore);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
8
plugins/onnx/.vscode/settings.json
vendored
8
plugins/onnx/.vscode/settings.json
vendored
@@ -3,16 +3,16 @@
|
||||
// docker installation
|
||||
// "scrypted.debugHost": "koushik-ubuntuvm",
|
||||
// "scrypted.serverRoot": "/server",
|
||||
"scrypted.debugHost": "koushik-ubuntuvm",
|
||||
"scrypted.serverRoot": "/home/koush/.scrypted",
|
||||
// "scrypted.debugHost": "koushik-ubuntuvm",
|
||||
// "scrypted.serverRoot": "/home/koush/.scrypted",
|
||||
|
||||
// pi local installation
|
||||
// "scrypted.debugHost": "192.168.2.119",
|
||||
// "scrypted.serverRoot": "/home/pi/.scrypted",
|
||||
|
||||
// local checkout
|
||||
// "scrypted.debugHost": "127.0.0.1",
|
||||
// "scrypted.serverRoot": "/Users/koush/.scrypted",
|
||||
"scrypted.debugHost": "127.0.0.1",
|
||||
"scrypted.serverRoot": "/Users/koush/.scrypted",
|
||||
// "scrypted.debugHost": "koushik-winvm",
|
||||
// "scrypted.serverRoot": "C:\\Users\\koush\\.scrypted",
|
||||
|
||||
|
||||
4
plugins/onnx/package-lock.json
generated
4
plugins/onnx/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/openvino",
|
||||
"version": "0.1.81",
|
||||
"version": "0.1.93",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/openvino",
|
||||
"version": "0.1.81",
|
||||
"version": "0.1.93",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
"runtime": "python",
|
||||
"type": "API",
|
||||
"interfaces": [
|
||||
"DeviceProvider",
|
||||
"Settings",
|
||||
"ObjectDetection",
|
||||
"ObjectDetectionPreview"
|
||||
@@ -41,5 +42,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.81"
|
||||
"version": "0.1.93"
|
||||
}
|
||||
|
||||
@@ -1,37 +1,43 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import json
|
||||
import platform
|
||||
import sys
|
||||
import threading
|
||||
import traceback
|
||||
from typing import Any, Tuple
|
||||
|
||||
import sys
|
||||
import platform
|
||||
import numpy as np
|
||||
import onnxruntime
|
||||
import scrypted_sdk
|
||||
from PIL import Image
|
||||
import ast
|
||||
from scrypted_sdk.other import SettingValue
|
||||
from scrypted_sdk.types import Setting
|
||||
import concurrent.futures
|
||||
|
||||
import common.yolo as yolo
|
||||
from predict import PredictPlugin
|
||||
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "ONNX-Predict")
|
||||
from .face_recognition import ONNXFaceRecognition
|
||||
|
||||
try:
|
||||
from .text_recognition import ONNXTextRecognition
|
||||
except:
|
||||
ONNXTextRecognition = None
|
||||
|
||||
availableModels = [
|
||||
"Default",
|
||||
"scrypted_yolov10m_320",
|
||||
"scrypted_yolov10n_320",
|
||||
"scrypted_yolo_nas_s_320",
|
||||
"scrypted_yolov6n_320",
|
||||
"scrypted_yolov6n",
|
||||
"scrypted_yolov6s_320",
|
||||
"scrypted_yolov6s",
|
||||
"scrypted_yolov9c_320",
|
||||
"scrypted_yolov9c",
|
||||
"scrypted_yolov8n_320",
|
||||
"scrypted_yolov8n",
|
||||
]
|
||||
|
||||
|
||||
def parse_labels(names):
|
||||
j = ast.literal_eval(names)
|
||||
ret = {}
|
||||
@@ -51,12 +57,14 @@ class ONNXPlugin(
|
||||
self.storage.setItem("model", "Default")
|
||||
model = "scrypted_yolov8n_320"
|
||||
self.yolo = "yolo" in model
|
||||
self.scrypted_yolov10 = "scrypted_yolov10" in model
|
||||
self.scrypted_yolo_nas = "scrypted_yolo_nas" in model
|
||||
self.scrypted_yolo = "scrypted_yolo" in model
|
||||
self.scrypted_model = "scrypted" in model
|
||||
|
||||
print(f"model {model}")
|
||||
|
||||
onnxmodel = "best" if self.scrypted_model else model
|
||||
onnxmodel = model if self.scrypted_yolo_nas else "best" if self.scrypted_model else model
|
||||
|
||||
model_version = "v2"
|
||||
onnxfile = self.downloadFile(
|
||||
@@ -66,34 +74,114 @@ class ONNXPlugin(
|
||||
|
||||
print(onnxfile)
|
||||
|
||||
deviceIds = self.storage.getItem("deviceIds") or '["0"]'
|
||||
deviceIds = json.loads(deviceIds)
|
||||
if not len(deviceIds):
|
||||
deviceIds = ["0"]
|
||||
self.deviceIds = deviceIds
|
||||
|
||||
compiled_models = []
|
||||
self.compiled_models = {}
|
||||
|
||||
try:
|
||||
sess_options = onnxruntime.SessionOptions()
|
||||
for deviceId in deviceIds:
|
||||
sess_options = onnxruntime.SessionOptions()
|
||||
|
||||
providers: list[str] = []
|
||||
if sys.platform == 'darwin':
|
||||
providers.append("CoreMLExecutionProvider")
|
||||
|
||||
if 'linux' in sys.platform and platform.machine() == 'x86_64':
|
||||
providers.append("CUDAExecutionProvider")
|
||||
providers: list[str] = []
|
||||
if sys.platform == 'darwin':
|
||||
providers.append("CoreMLExecutionProvider")
|
||||
|
||||
providers.append('CPUExecutionProvider')
|
||||
if ('linux' in sys.platform or 'win' in sys.platform) and platform.machine() == 'x86_64':
|
||||
deviceId = int(deviceId)
|
||||
providers.append(("CUDAExecutionProvider", { "device_id": deviceId }))
|
||||
|
||||
providers.append('CPUExecutionProvider')
|
||||
|
||||
compiled_model = onnxruntime.InferenceSession(onnxfile, sess_options=sess_options, providers=providers)
|
||||
compiled_models.append(compiled_model)
|
||||
|
||||
input = compiled_model.get_inputs()[0]
|
||||
self.model_dim = input.shape[2]
|
||||
self.input_name = input.name
|
||||
self.labels = parse_labels(compiled_model.get_modelmeta().custom_metadata_map['names'])
|
||||
|
||||
self.compiled_model = onnxruntime.InferenceSession(onnxfile, sess_options=sess_options, providers=providers)
|
||||
except:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
print("Reverting all settings.")
|
||||
self.storage.removeItem("model")
|
||||
self.storage.removeItem("deviceIds")
|
||||
self.requestRestart()
|
||||
|
||||
input = self.compiled_model.get_inputs()[0]
|
||||
self.model_dim = input.shape[2]
|
||||
self.input_name = input.name
|
||||
self.labels = parse_labels(self.compiled_model.get_modelmeta().custom_metadata_map['names'])
|
||||
def executor_initializer():
|
||||
thread_name = threading.current_thread().name
|
||||
interpreter = compiled_models.pop()
|
||||
self.compiled_models[thread_name] = interpreter
|
||||
print('Runtime initialized on thread {}'.format(thread_name))
|
||||
|
||||
self.executor = concurrent.futures.ThreadPoolExecutor(
|
||||
initializer=executor_initializer,
|
||||
max_workers=len(compiled_models),
|
||||
thread_name_prefix="onnx",
|
||||
)
|
||||
|
||||
self.prepareExecutor = concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=len(compiled_models),
|
||||
thread_name_prefix="onnx-prepare",
|
||||
)
|
||||
|
||||
self.faceDevice = None
|
||||
self.textDevice = None
|
||||
asyncio.ensure_future(self.prepareRecognitionModels(), loop=self.loop)
|
||||
|
||||
async def prepareRecognitionModels(self):
|
||||
try:
|
||||
devices = [
|
||||
{
|
||||
"nativeId": "facerecognition",
|
||||
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
|
||||
"interfaces": [
|
||||
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
|
||||
],
|
||||
"name": "ONNX Face Recognition",
|
||||
},
|
||||
]
|
||||
|
||||
if ONNXTextRecognition:
|
||||
devices.append(
|
||||
{
|
||||
"nativeId": "textrecognition",
|
||||
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
|
||||
"interfaces": [
|
||||
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
|
||||
],
|
||||
"name": "ONNX Text Recognition",
|
||||
},
|
||||
)
|
||||
|
||||
await scrypted_sdk.deviceManager.onDevicesChanged(
|
||||
{
|
||||
"devices": devices,
|
||||
}
|
||||
)
|
||||
except:
|
||||
pass
|
||||
|
||||
async def getDevice(self, nativeId: str) -> Any:
|
||||
if nativeId == "facerecognition":
|
||||
self.faceDevice = self.faceDevice or ONNXFaceRecognition(self, nativeId)
|
||||
return self.faceDevice
|
||||
elif nativeId == "textrecognition":
|
||||
self.textDevice = self.textDevice or ONNXTextRecognition(self, nativeId)
|
||||
return self.textDevice
|
||||
raise Exception("unknown device")
|
||||
|
||||
async def getSettings(self) -> list[Setting]:
|
||||
model = self.storage.getItem("model") or "Default"
|
||||
deviceIds = self.storage.getItem("deviceIds") or '["0"]'
|
||||
deviceIds = json.loads(deviceIds)
|
||||
|
||||
return [
|
||||
{
|
||||
"key": "model",
|
||||
@@ -102,9 +190,26 @@ class ONNXPlugin(
|
||||
"choices": availableModels,
|
||||
"value": model,
|
||||
},
|
||||
{
|
||||
"key": "deviceIds",
|
||||
"title": "Device IDs",
|
||||
"description": "Optional: Assign multiple CUDA Device IDs to use for detection.",
|
||||
"choices": deviceIds,
|
||||
"combobox": True,
|
||||
"multiple": True,
|
||||
"value": deviceIds,
|
||||
},
|
||||
{
|
||||
"key": "execution_device",
|
||||
"title": "Execution Device",
|
||||
"readonly": True,
|
||||
"value": onnxruntime.get_device(),
|
||||
}
|
||||
]
|
||||
|
||||
async def putSetting(self, key: str, value: SettingValue):
|
||||
if (key == 'deviceIds'):
|
||||
value = json.dumps(value)
|
||||
self.storage.setItem(key, value)
|
||||
await self.onDeviceEvent(scrypted_sdk.ScryptedInterface.Settings.value, None)
|
||||
self.requestRestart()
|
||||
@@ -117,25 +222,32 @@ class ONNXPlugin(
|
||||
return [self.model_dim, self.model_dim]
|
||||
|
||||
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
|
||||
def predict(input_tensor):
|
||||
output_tensors = self.compiled_model.run(None, { self.input_name: input_tensor })
|
||||
objs = yolo.parse_yolov9(output_tensors[0][0])
|
||||
return objs
|
||||
def prepare():
|
||||
im = np.array(input)
|
||||
im = np.expand_dims(input, axis=0)
|
||||
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
|
||||
im = im.astype(np.float32) / 255.0
|
||||
im = np.ascontiguousarray(im) # contiguous
|
||||
return im
|
||||
|
||||
im = np.array(input)
|
||||
im = np.stack([input])
|
||||
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
|
||||
im = im.astype(np.float32) / 255.0
|
||||
im = np.ascontiguousarray(im) # contiguous
|
||||
input_tensor = im
|
||||
def predict(input_tensor):
|
||||
compiled_model = self.compiled_models[threading.current_thread().name]
|
||||
output_tensors = compiled_model.run(None, { self.input_name: input_tensor })
|
||||
if self.scrypted_yolov10:
|
||||
return yolo.parse_yolov10(output_tensors[0][0])
|
||||
if self.scrypted_yolo_nas:
|
||||
return yolo.parse_yolo_nas([output_tensors[1], output_tensors[0]])
|
||||
return yolo.parse_yolov9(output_tensors[0][0])
|
||||
|
||||
try:
|
||||
input_tensor = await asyncio.get_event_loop().run_in_executor(
|
||||
self.prepareExecutor, lambda: prepare()
|
||||
)
|
||||
objs = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor, lambda: predict(input_tensor)
|
||||
self.executor, lambda: predict(input_tensor)
|
||||
)
|
||||
|
||||
except:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
112
plugins/onnx/src/ort/face_recognition.py
Normal file
112
plugins/onnx/src/ort/face_recognition.py
Normal file
@@ -0,0 +1,112 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import platform
|
||||
import sys
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
import onnxruntime
|
||||
from PIL import Image
|
||||
|
||||
from predict.face_recognize import FaceRecognizeDetection
|
||||
|
||||
|
||||
class ONNXFaceRecognition(FaceRecognizeDetection):
|
||||
def __init__(self, plugin, nativeId: str | None = None):
|
||||
self.plugin = plugin
|
||||
|
||||
super().__init__(nativeId=nativeId)
|
||||
|
||||
def downloadModel(self, model: str):
|
||||
onnxmodel = "best" if "scrypted" in model else model
|
||||
model_version = "v1"
|
||||
onnxfile = self.downloadFile(
|
||||
f"https://raw.githubusercontent.com/koush/onnx-models/main/{model}/{onnxmodel}.onnx",
|
||||
f"{model_version}/{model}/{onnxmodel}.onnx",
|
||||
)
|
||||
print(onnxfile)
|
||||
|
||||
compiled_models_array = []
|
||||
compiled_models = {}
|
||||
deviceIds = self.plugin.deviceIds
|
||||
|
||||
for deviceId in deviceIds:
|
||||
sess_options = onnxruntime.SessionOptions()
|
||||
|
||||
providers: list[str] = []
|
||||
if sys.platform == "darwin":
|
||||
providers.append("CoreMLExecutionProvider")
|
||||
|
||||
if "linux" in sys.platform and platform.machine() == "x86_64":
|
||||
deviceId = int(deviceId)
|
||||
providers.append(("CUDAExecutionProvider", {"device_id": deviceId}))
|
||||
|
||||
providers.append("CPUExecutionProvider")
|
||||
|
||||
compiled_model = onnxruntime.InferenceSession(
|
||||
onnxfile, sess_options=sess_options, providers=providers
|
||||
)
|
||||
compiled_models_array.append(compiled_model)
|
||||
|
||||
input = compiled_model.get_inputs()[0]
|
||||
input_name = input.name
|
||||
|
||||
def executor_initializer():
|
||||
thread_name = threading.current_thread().name
|
||||
interpreter = compiled_models_array.pop()
|
||||
compiled_models[thread_name] = interpreter
|
||||
print("Runtime initialized on thread {}".format(thread_name))
|
||||
|
||||
executor = concurrent.futures.ThreadPoolExecutor(
|
||||
initializer=executor_initializer,
|
||||
max_workers=len(compiled_models_array),
|
||||
thread_name_prefix="face",
|
||||
)
|
||||
|
||||
prepareExecutor = concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=len(compiled_models_array),
|
||||
thread_name_prefix="face-prepare",
|
||||
)
|
||||
|
||||
return compiled_models, input_name, prepareExecutor, executor
|
||||
|
||||
async def predictDetectModel(self, input: Image.Image):
|
||||
compiled_models, input_name, prepareExecutor, executor = self.detectModel
|
||||
|
||||
def prepare():
|
||||
im = np.array(input)
|
||||
im = np.expand_dims(input, axis=0)
|
||||
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
|
||||
im = im.astype(np.float32) / 255.0
|
||||
im = np.ascontiguousarray(im) # contiguous
|
||||
return im
|
||||
|
||||
def predict(input_tensor):
|
||||
compiled_model = compiled_models[threading.current_thread().name]
|
||||
output_tensors = compiled_model.run(None, {input_name: input_tensor})
|
||||
return output_tensors
|
||||
|
||||
input_tensor = await asyncio.get_event_loop().run_in_executor(
|
||||
prepareExecutor, lambda: prepare()
|
||||
)
|
||||
objs = await asyncio.get_event_loop().run_in_executor(
|
||||
executor, lambda: predict(input_tensor)
|
||||
)
|
||||
|
||||
return objs[0][0]
|
||||
|
||||
async def predictFaceModel(self, input: np.ndarray):
|
||||
compiled_models, input_name, prepareExecutor, executor = self.faceModel
|
||||
|
||||
def predict():
|
||||
compiled_model = compiled_models[threading.current_thread().name]
|
||||
output_tensors = compiled_model.run(None, {input_name: input})
|
||||
return output_tensors
|
||||
|
||||
objs = await asyncio.get_event_loop().run_in_executor(
|
||||
executor, lambda: predict()
|
||||
)
|
||||
|
||||
return objs[0]
|
||||
102
plugins/onnx/src/ort/text_recognition.py
Normal file
102
plugins/onnx/src/ort/text_recognition.py
Normal file
@@ -0,0 +1,102 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import platform
|
||||
import sys
|
||||
import threading
|
||||
|
||||
import numpy as np
|
||||
import onnxruntime
|
||||
from PIL import Image
|
||||
|
||||
from predict.text_recognize import TextRecognition
|
||||
|
||||
|
||||
class ONNXTextRecognition(TextRecognition):
|
||||
def __init__(self, plugin, nativeId: str | None = None):
|
||||
self.plugin = plugin
|
||||
|
||||
super().__init__(nativeId=nativeId)
|
||||
|
||||
def downloadModel(self, model: str):
|
||||
onnxmodel = model
|
||||
model_version = "v3"
|
||||
onnxfile = self.downloadFile(
|
||||
f"https://raw.githubusercontent.com/koush/onnx-models/main/{model}/{onnxmodel}.onnx",
|
||||
f"{model_version}/{model}/{onnxmodel}.onnx",
|
||||
)
|
||||
print(onnxfile)
|
||||
|
||||
compiled_models_array = []
|
||||
compiled_models = {}
|
||||
deviceIds = self.plugin.deviceIds
|
||||
|
||||
for deviceId in deviceIds:
|
||||
sess_options = onnxruntime.SessionOptions()
|
||||
|
||||
providers: list[str] = []
|
||||
if sys.platform == "darwin":
|
||||
providers.append("CoreMLExecutionProvider")
|
||||
|
||||
if "linux" in sys.platform and platform.machine() == "x86_64":
|
||||
deviceId = int(deviceId)
|
||||
providers.append(("CUDAExecutionProvider", {"device_id": deviceId}))
|
||||
|
||||
providers.append("CPUExecutionProvider")
|
||||
|
||||
compiled_model = onnxruntime.InferenceSession(
|
||||
onnxfile, sess_options=sess_options, providers=providers
|
||||
)
|
||||
compiled_models_array.append(compiled_model)
|
||||
|
||||
input = compiled_model.get_inputs()[0]
|
||||
input_name = input.name
|
||||
|
||||
def executor_initializer():
|
||||
thread_name = threading.current_thread().name
|
||||
interpreter = compiled_models_array.pop()
|
||||
compiled_models[thread_name] = interpreter
|
||||
print("Runtime initialized on thread {}".format(thread_name))
|
||||
|
||||
executor = concurrent.futures.ThreadPoolExecutor(
|
||||
initializer=executor_initializer,
|
||||
max_workers=len(compiled_models_array),
|
||||
thread_name_prefix="text",
|
||||
)
|
||||
|
||||
prepareExecutor = concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=len(compiled_models_array),
|
||||
thread_name_prefix="text-prepare",
|
||||
)
|
||||
|
||||
return compiled_models, input_name, prepareExecutor, executor
|
||||
|
||||
async def predictDetectModel(self, input: Image.Image):
|
||||
compiled_models, input_name, prepareExecutor, executor = self.detectModel
|
||||
|
||||
def predict():
|
||||
compiled_model = compiled_models[threading.current_thread().name]
|
||||
output_tensors = compiled_model.run(None, {input_name: input})
|
||||
return output_tensors
|
||||
|
||||
objs = await asyncio.get_event_loop().run_in_executor(
|
||||
executor, lambda: predict()
|
||||
)
|
||||
|
||||
return objs[0]
|
||||
|
||||
async def predictTextModel(self, input: np.ndarray):
|
||||
input = input.astype(np.float32)
|
||||
compiled_models, input_name, prepareExecutor, executor = self.textModel
|
||||
|
||||
def predict():
|
||||
compiled_model = compiled_models[threading.current_thread().name]
|
||||
output_tensors = compiled_model.run(None, {input_name: input})
|
||||
return output_tensors
|
||||
|
||||
objs = await asyncio.get_event_loop().run_in_executor(
|
||||
executor, lambda: predict()
|
||||
)
|
||||
|
||||
return objs[0]
|
||||
1
plugins/onnx/src/requirements.optional.txt
Normal file
1
plugins/onnx/src/requirements.optional.txt
Normal file
@@ -0,0 +1 @@
|
||||
opencv-python
|
||||
@@ -4,6 +4,7 @@
|
||||
onnxruntime-gpu; 'linux' in sys_platform and platform_machine == 'x86_64'
|
||||
# cpu and coreml execution provider
|
||||
onnxruntime; 'linux' not in sys_platform or platform_machine != 'x86_64'
|
||||
# nightly?
|
||||
# ort-nightly-gpu==1.17.3.dev20240409002
|
||||
|
||||
# pillow-simd is available on x64 linux
|
||||
|
||||
4
plugins/openvino/.vscode/settings.json
vendored
4
plugins/openvino/.vscode/settings.json
vendored
@@ -4,6 +4,10 @@
|
||||
// "scrypted.debugHost": "koushik-ubuntu",
|
||||
// "scrypted.serverRoot": "/server",
|
||||
|
||||
// proxmox installation
|
||||
// "scrypted.debugHost": "scrypted-server",
|
||||
// "scrypted.serverRoot": "/root/.scrypted",
|
||||
|
||||
// pi local installation
|
||||
// "scrypted.debugHost": "192.168.2.119",
|
||||
// "scrypted.serverRoot": "/home/pi/.scrypted",
|
||||
|
||||
4
plugins/openvino/package-lock.json
generated
4
plugins/openvino/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/openvino",
|
||||
"version": "0.1.80",
|
||||
"version": "0.1.89",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/openvino",
|
||||
"version": "0.1.80",
|
||||
"version": "0.1.89",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -42,5 +42,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.80"
|
||||
"version": "0.1.89"
|
||||
}
|
||||
|
||||
@@ -6,8 +6,54 @@ from predict.rectangle import Rectangle
|
||||
|
||||
defaultThreshold = .2
|
||||
|
||||
def parse_yolov9(results, threshold = defaultThreshold, scale = None, confidence_scale = None):
|
||||
def parse_yolov10(results, threshold = defaultThreshold, scale = None, confidence_scale = None):
|
||||
objs: list[Prediction] = []
|
||||
keep = np.argwhere(results[4:] > threshold)
|
||||
for indices in keep:
|
||||
class_id = indices[0]
|
||||
index = indices[1]
|
||||
confidence = results[class_id + 4, index].astype(float)
|
||||
l = results[0][index].astype(float)
|
||||
t = results[1][index].astype(float)
|
||||
r = results[2][index].astype(float)
|
||||
b = results[3][index].astype(float)
|
||||
if scale:
|
||||
l = scale(l)
|
||||
t = scale(t)
|
||||
r = scale(r)
|
||||
b = scale(b)
|
||||
if confidence_scale:
|
||||
confidence = confidence_scale(confidence)
|
||||
obj = Prediction(
|
||||
int(class_id),
|
||||
confidence,
|
||||
Rectangle(
|
||||
l,
|
||||
t,
|
||||
r,
|
||||
b,
|
||||
),
|
||||
)
|
||||
objs.append(obj)
|
||||
|
||||
return objs
|
||||
|
||||
def parse_yolo_nas(predictions):
|
||||
objs = []
|
||||
for pred_scores, pred_bboxes in zip(*predictions):
|
||||
i, j = np.nonzero(pred_scores > .5)
|
||||
pred_bboxes = pred_bboxes[i]
|
||||
pred_cls_conf = pred_scores[i, j]
|
||||
pred_cls_label = j[:]
|
||||
for box, conf, label in zip(pred_bboxes, pred_cls_conf, pred_cls_label):
|
||||
obj = Prediction(
|
||||
int(label), conf.astype(float), Rectangle(box[0].astype(float), box[1].astype(float), box[2].astype(float), box[3].astype(float))
|
||||
)
|
||||
objs.append(obj)
|
||||
return objs
|
||||
|
||||
def parse_yolov9(results, threshold = defaultThreshold, scale = None, confidence_scale = None):
|
||||
objs: list[Prediction] = []
|
||||
keep = np.argwhere(results[4:] > threshold)
|
||||
for indices in keep:
|
||||
class_id = indices[0]
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import json
|
||||
import re
|
||||
import traceback
|
||||
from typing import Any, Tuple
|
||||
|
||||
import numpy as np
|
||||
@@ -11,30 +13,30 @@ import scrypted_sdk
|
||||
from PIL import Image
|
||||
from scrypted_sdk.other import SettingValue
|
||||
from scrypted_sdk.types import Setting
|
||||
import concurrent.futures
|
||||
|
||||
import common.yolo as yolo
|
||||
from predict import Prediction, PredictPlugin
|
||||
from predict.rectangle import Rectangle
|
||||
|
||||
from .face_recognition import OpenVINOFaceRecognition
|
||||
|
||||
try:
|
||||
from .text_recognition import OpenVINOTextRecognition
|
||||
except:
|
||||
OpenVINOTextRecognition = None
|
||||
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "OpenVINO-Predict")
|
||||
prepareExecutor = concurrent.futures.ThreadPoolExecutor(1, "OpenVINO-Prepare")
|
||||
|
||||
availableModels = [
|
||||
"Default",
|
||||
"scrypted_yolov10m_320",
|
||||
"scrypted_yolov10n_320",
|
||||
"scrypted_yolo_nas_s_320",
|
||||
"scrypted_yolov6n_320",
|
||||
"scrypted_yolov6n",
|
||||
"scrypted_yolov6s_320",
|
||||
"scrypted_yolov6s",
|
||||
"scrypted_yolov9c_320",
|
||||
"scrypted_yolov9c",
|
||||
"scrypted_yolov8n_320",
|
||||
"scrypted_yolov8n",
|
||||
"ssd_mobilenet_v1_coco",
|
||||
"ssdlite_mobilenet_v2",
|
||||
"yolo-v3-tiny-tf",
|
||||
@@ -121,10 +123,10 @@ class OpenVINOPlugin(
|
||||
if using_mode == "AUTO":
|
||||
if "GPU" in available_devices:
|
||||
using_mode = "GPU"
|
||||
if using_mode == "GPU":
|
||||
precision = "FP16"
|
||||
else:
|
||||
precision = "FP32"
|
||||
|
||||
# FP16 is smaller and the default export. no tangible performance difference.
|
||||
# https://docs.openvino.ai/2023.3/openvino_docs_OV_Converter_UG_Conversion_Options.html
|
||||
precision = "FP16"
|
||||
|
||||
self.precision = precision
|
||||
|
||||
@@ -134,6 +136,8 @@ class OpenVINOPlugin(
|
||||
self.storage.setItem("model", "Default")
|
||||
model = "scrypted_yolov8n_320"
|
||||
self.yolo = "yolo" in model
|
||||
self.scrypted_yolov10 = "scrypted_yolov10" in model
|
||||
self.scrypted_yolo_nas = "scrypted_yolo_nas" in model
|
||||
self.scrypted_yolo = "scrypted_yolo" in model
|
||||
self.scrypted_model = "scrypted" in model
|
||||
self.sigmoid = model == "yolo-v4-tiny-tf"
|
||||
@@ -151,7 +155,12 @@ class OpenVINOPlugin(
|
||||
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.bin",
|
||||
f"{model_version}/{model}/{precision}/{ovmodel}.bin",
|
||||
)
|
||||
if self.scrypted_model:
|
||||
if self.scrypted_yolo_nas:
|
||||
labelsFile = self.downloadFile(
|
||||
"https://raw.githubusercontent.com/koush/openvino-models/main/scrypted_nas_labels.txt",
|
||||
"scrypted_nas_labels.txt",
|
||||
)
|
||||
elif self.scrypted_model:
|
||||
labelsFile = self.downloadFile(
|
||||
"https://raw.githubusercontent.com/koush/openvino-models/main/scrypted_labels.txt",
|
||||
"scrypted_labels.txt",
|
||||
@@ -194,6 +203,8 @@ class OpenVINOPlugin(
|
||||
labels_contents = open(labelsFile, "r").read()
|
||||
self.labels = parse_label_contents(labels_contents)
|
||||
|
||||
self.faceDevice = None
|
||||
self.textDevice = None
|
||||
asyncio.ensure_future(self.prepareRecognitionModels(), loop=self.loop)
|
||||
|
||||
async def getSettings(self) -> list[Setting]:
|
||||
@@ -262,8 +273,11 @@ class OpenVINOPlugin(
|
||||
objs = []
|
||||
|
||||
if self.scrypted_yolo:
|
||||
objs = yolo.parse_yolov9(output_tensors[0][0])
|
||||
return objs
|
||||
if self.scrypted_yolov10:
|
||||
return yolo.parse_yolov10(output_tensors[0][0])
|
||||
if self.scrypted_yolo_nas:
|
||||
return yolo.parse_yolo_nas([output_tensors[1], output_tensors[0]])
|
||||
return yolo.parse_yolov9(output_tensors[0][0])
|
||||
|
||||
if self.yolo:
|
||||
# index 2 will always either be 13 or 26
|
||||
@@ -314,30 +328,34 @@ class OpenVINOPlugin(
|
||||
|
||||
return objs
|
||||
|
||||
# the input_tensor can be created with the shared_memory=True parameter,
|
||||
# but that seems to cause issues on some platforms.
|
||||
if self.scrypted_yolo:
|
||||
im = np.stack([input])
|
||||
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
|
||||
im = im.astype(np.float32) / 255.0
|
||||
im = np.ascontiguousarray(im) # contiguous
|
||||
im = ov.Tensor(array=im)
|
||||
input_tensor = im
|
||||
elif self.yolo:
|
||||
input_tensor = ov.Tensor(
|
||||
array=np.expand_dims(np.array(input), axis=0).astype(np.float32)
|
||||
)
|
||||
else:
|
||||
input_tensor = ov.Tensor(array=np.expand_dims(np.array(input), axis=0))
|
||||
|
||||
def prepare():
|
||||
# the input_tensor can be created with the shared_memory=True parameter,
|
||||
# but that seems to cause issues on some platforms.
|
||||
if self.scrypted_yolo:
|
||||
im = np.array(input)
|
||||
im = np.expand_dims(input, axis=0)
|
||||
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
|
||||
im = im.astype(np.float32) / 255.0
|
||||
im = np.ascontiguousarray(im) # contiguous
|
||||
input_tensor = ov.Tensor(array=im)
|
||||
elif self.yolo:
|
||||
input_tensor = ov.Tensor(
|
||||
array=np.expand_dims(np.array(input), axis=0).astype(np.float32)
|
||||
)
|
||||
else:
|
||||
input_tensor = ov.Tensor(array=np.expand_dims(np.array(input), axis=0))
|
||||
return input_tensor
|
||||
|
||||
try:
|
||||
input_tensor = await asyncio.get_event_loop().run_in_executor(
|
||||
prepareExecutor, lambda: prepare()
|
||||
)
|
||||
objs = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor, lambda: predict(input_tensor)
|
||||
)
|
||||
|
||||
except:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
@@ -379,7 +397,9 @@ class OpenVINOPlugin(
|
||||
|
||||
async def getDevice(self, nativeId: str) -> Any:
|
||||
if nativeId == "facerecognition":
|
||||
return OpenVINOFaceRecognition(self, nativeId)
|
||||
self.faceDevice = self.faceDevice or OpenVINOFaceRecognition(self, nativeId)
|
||||
return self.faceDevice
|
||||
elif nativeId == "textrecognition":
|
||||
return OpenVINOTextRecognition(self, nativeId)
|
||||
self.textDevice = self.textDevice or OpenVINOTextRecognition(self, nativeId)
|
||||
return self.textDevice
|
||||
raise Exception("unknown device")
|
||||
|
||||
7
plugins/openvino/src/ov/async_infer.py
Normal file
7
plugins/openvino/src/ov/async_infer.py
Normal file
@@ -0,0 +1,7 @@
|
||||
import concurrent.futures
|
||||
|
||||
|
||||
def create_executors(name: str):
|
||||
prepare = concurrent.futures.ThreadPoolExecutor(1, "OpenVINO-{f}Prepare")
|
||||
predict = concurrent.futures.ThreadPoolExecutor(1, "OpenVINO-{f}}Predict")
|
||||
return prepare, predict
|
||||
@@ -1,23 +1,19 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import concurrent.futures
|
||||
import openvino.runtime as ov
|
||||
import asyncio
|
||||
|
||||
import numpy as np
|
||||
import openvino.runtime as ov
|
||||
from PIL import Image
|
||||
|
||||
from ov import async_infer
|
||||
from predict.face_recognize import FaceRecognizeDetection
|
||||
|
||||
faceDetectPrepare, faceDetectPredict = async_infer.create_executors("FaceDetect")
|
||||
faceRecognizePrepare, faceRecognizePredict = async_infer.create_executors(
|
||||
"FaceRecognize"
|
||||
)
|
||||
|
||||
def euclidean_distance(arr1, arr2):
|
||||
return np.linalg.norm(arr1 - arr2)
|
||||
|
||||
|
||||
def cosine_similarity(vector_a, vector_b):
|
||||
dot_product = np.dot(vector_a, vector_b)
|
||||
norm_a = np.linalg.norm(vector_a)
|
||||
norm_b = np.linalg.norm(vector_b)
|
||||
similarity = dot_product / (norm_a * norm_b)
|
||||
return similarity
|
||||
|
||||
class OpenVINOFaceRecognition(FaceRecognizeDetection):
|
||||
def __init__(self, plugin, nativeId: str | None = None):
|
||||
@@ -40,32 +36,35 @@ class OpenVINOFaceRecognition(FaceRecognizeDetection):
|
||||
print(xmlFile, binFile)
|
||||
return self.plugin.core.compile_model(xmlFile, self.plugin.mode)
|
||||
|
||||
def predictDetectModel(self, input):
|
||||
infer_request = self.detectModel.create_infer_request()
|
||||
im = np.stack([input])
|
||||
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
|
||||
im = im.astype(np.float32) / 255.0
|
||||
im = np.ascontiguousarray(im) # contiguous
|
||||
im = ov.Tensor(array=im)
|
||||
input_tensor = im
|
||||
infer_request.set_input_tensor(input_tensor)
|
||||
infer_request.start_async()
|
||||
infer_request.wait()
|
||||
return infer_request.output_tensors[0].data[0]
|
||||
async def predictDetectModel(self, input: Image.Image):
|
||||
def predict():
|
||||
im = np.expand_dims(input, axis=0)
|
||||
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
|
||||
im = im.astype(np.float32) / 255.0
|
||||
im = np.ascontiguousarray(im) # contiguous
|
||||
|
||||
def predictFaceModel(self, input):
|
||||
im = ov.Tensor(array=input)
|
||||
infer_request = self.faceModel.create_infer_request()
|
||||
infer_request.set_input_tensor(im)
|
||||
infer_request.start_async()
|
||||
infer_request.wait()
|
||||
return infer_request.output_tensors[0].data[0]
|
||||
infer_request = self.detectModel.create_infer_request()
|
||||
tensor = ov.Tensor(array=im)
|
||||
infer_request.set_input_tensor(tensor)
|
||||
output_tensors = infer_request.infer()
|
||||
ret = output_tensors[0][0]
|
||||
return ret
|
||||
|
||||
def predictTextModel(self, input):
|
||||
input = input.astype(np.float32)
|
||||
im = ov.Tensor(array=input)
|
||||
infer_request = self.textModel.create_infer_request()
|
||||
infer_request.set_input_tensor(im)
|
||||
infer_request.start_async()
|
||||
infer_request.wait()
|
||||
return infer_request.output_tensors[0].data
|
||||
ret = await asyncio.get_event_loop().run_in_executor(
|
||||
faceDetectPredict, lambda: predict()
|
||||
)
|
||||
return ret
|
||||
|
||||
async def predictFaceModel(self, input: np.ndarray):
|
||||
def predict():
|
||||
im = ov.Tensor(array=input)
|
||||
infer_request = self.faceModel.create_infer_request()
|
||||
infer_request.set_input_tensor(im)
|
||||
output_tensors = infer_request.infer()
|
||||
ret = output_tensors[0]
|
||||
return ret
|
||||
|
||||
ret = await asyncio.get_event_loop().run_in_executor(
|
||||
faceRecognizePredict, lambda: predict()
|
||||
)
|
||||
return ret
|
||||
|
||||
@@ -1,10 +1,18 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import openvino.runtime as ov
|
||||
import numpy as np
|
||||
import asyncio
|
||||
|
||||
import numpy as np
|
||||
import openvino.runtime as ov
|
||||
|
||||
from ov import async_infer
|
||||
from predict.text_recognize import TextRecognition
|
||||
|
||||
textDetectPrepare, textDetectPredict = async_infer.create_executors("TextDetect")
|
||||
textRecognizePrepare, textRecognizePredict = async_infer.create_executors(
|
||||
"TextRecognize"
|
||||
)
|
||||
|
||||
|
||||
class OpenVINOTextRecognition(TextRecognition):
|
||||
def __init__(self, plugin, nativeId: str | None = None):
|
||||
@@ -27,20 +35,31 @@ class OpenVINOTextRecognition(TextRecognition):
|
||||
print(xmlFile, binFile)
|
||||
return self.plugin.core.compile_model(xmlFile, self.plugin.mode)
|
||||
|
||||
def predictDetectModel(self, input):
|
||||
infer_request = self.detectModel.create_infer_request()
|
||||
im = ov.Tensor(array=input)
|
||||
input_tensor = im
|
||||
infer_request.set_input_tensor(input_tensor)
|
||||
infer_request.start_async()
|
||||
infer_request.wait()
|
||||
return infer_request.output_tensors[0].data
|
||||
async def predictDetectModel(self, input: np.ndarray):
|
||||
def predict():
|
||||
infer_request = self.detectModel.create_infer_request()
|
||||
im = ov.Tensor(array=input)
|
||||
input_tensor = im
|
||||
infer_request.set_input_tensor(input_tensor)
|
||||
output_tensors = infer_request.infer()
|
||||
ret = output_tensors[0]
|
||||
return ret
|
||||
|
||||
def predictTextModel(self, input):
|
||||
input = input.astype(np.float32)
|
||||
im = ov.Tensor(array=input)
|
||||
infer_request = self.textModel.create_infer_request()
|
||||
infer_request.set_input_tensor(im)
|
||||
infer_request.start_async()
|
||||
infer_request.wait()
|
||||
return infer_request.output_tensors[0].data
|
||||
ret = await asyncio.get_event_loop().run_in_executor(
|
||||
textDetectPredict, lambda: predict()
|
||||
)
|
||||
return ret
|
||||
|
||||
async def predictTextModel(self, input: np.ndarray):
|
||||
def predict():
|
||||
im = ov.Tensor(array=input.astype(np.float32))
|
||||
infer_request = self.textModel.create_infer_request()
|
||||
infer_request.set_input_tensor(im)
|
||||
output_tensors = infer_request.infer()
|
||||
ret = output_tensors[0]
|
||||
return ret
|
||||
|
||||
ret = await asyncio.get_event_loop().run_in_executor(
|
||||
textDetectPredict, lambda: predict()
|
||||
)
|
||||
return ret
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
# 2024-04-23 - modify timestamp to force pip reinstall
|
||||
openvino==2024.0.0
|
||||
openvino==2024.1.0
|
||||
|
||||
# pillow-simd is available on x64 linux
|
||||
# pillow-simd confirmed not building with arm64 linux or apple silicon
|
||||
|
||||
4
plugins/prebuffer-mixin/package-lock.json
generated
4
plugins/prebuffer-mixin/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/prebuffer-mixin",
|
||||
"version": "0.10.18",
|
||||
"version": "0.10.23",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/prebuffer-mixin",
|
||||
"version": "0.10.18",
|
||||
"version": "0.10.23",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/prebuffer-mixin",
|
||||
"version": "0.10.18",
|
||||
"version": "0.10.23",
|
||||
"description": "Video Stream Rebroadcast, Prebuffer, and Management Plugin for Scrypted.",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -1,15 +1,13 @@
|
||||
import { cloneDeep } from '@scrypted/common/src/clone-deep';
|
||||
import { Deferred } from "@scrypted/common/src/deferred";
|
||||
import { listenZeroSingleClient } from '@scrypted/common/src/listen-cluster';
|
||||
import { ffmpegLogInitialOutput, safeKillFFmpeg, safePrintFFmpegArguments } from '@scrypted/common/src/media-helpers';
|
||||
import { createRtspParser } from "@scrypted/common/src/rtsp-server";
|
||||
import { parseSdp } from "@scrypted/common/src/sdp-utils";
|
||||
import { StreamChunk, StreamParser } from '@scrypted/common/src/stream-parser';
|
||||
import sdk, { FFmpegInput, RequestMediaStreamOptions, ResponseMediaStreamOptions } from "@scrypted/sdk";
|
||||
import child_process, { ChildProcess, StdioOptions } from 'child_process';
|
||||
import { EventEmitter } from 'events';
|
||||
import { Server } from 'net';
|
||||
import { Duplex } from 'stream';
|
||||
import { cloneDeep } from './clone-deep';
|
||||
import { Deferred } from "./deferred";
|
||||
import { listenZeroSingleClient } from './listen-cluster';
|
||||
import { ffmpegLogInitialOutput, safeKillFFmpeg, safePrintFFmpegArguments } from './media-helpers';
|
||||
import { createRtspParser } from "./rtsp-server";
|
||||
import { parseSdp } from "./sdp-utils";
|
||||
import { StreamChunk, StreamParser } from './stream-parser';
|
||||
|
||||
const { mediaManager } = sdk;
|
||||
|
||||
@@ -339,64 +337,3 @@ export async function startParserSession<T extends string>(ffmpegInput: FFmpegIn
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export interface Rebroadcaster {
|
||||
server: Server;
|
||||
port: number;
|
||||
url: string;
|
||||
clients: number;
|
||||
}
|
||||
|
||||
export interface RebroadcastSessionCleanup {
|
||||
(): void;
|
||||
}
|
||||
|
||||
export interface RebroadcasterConnection {
|
||||
writeData: (data: StreamChunk) => number;
|
||||
destroy: () => void;
|
||||
}
|
||||
|
||||
export interface RebroadcasterOptions {
|
||||
connect?: (connection: RebroadcasterConnection) => RebroadcastSessionCleanup | undefined;
|
||||
console?: Console;
|
||||
idle?: {
|
||||
timeout: number,
|
||||
callback: () => void,
|
||||
},
|
||||
}
|
||||
|
||||
export function handleRebroadcasterClient(socket: Duplex, options?: RebroadcasterOptions) {
|
||||
const firstWriteData = (data: StreamChunk) => {
|
||||
if (data.startStream) {
|
||||
socket.write(data.startStream)
|
||||
}
|
||||
connection.writeData = writeData;
|
||||
return writeData(data);
|
||||
}
|
||||
const writeData = (data: StreamChunk) => {
|
||||
for (const chunk of data.chunks) {
|
||||
socket.write(chunk);
|
||||
}
|
||||
|
||||
return socket.writableLength;
|
||||
};
|
||||
|
||||
const destroy = () => {
|
||||
const cb = cleanupCallback;
|
||||
cleanupCallback = undefined;
|
||||
socket.destroy();
|
||||
cb?.();
|
||||
}
|
||||
|
||||
const connection: RebroadcasterConnection = {
|
||||
writeData: firstWriteData,
|
||||
destroy,
|
||||
};
|
||||
|
||||
let cleanupCallback = options?.connect(connection);
|
||||
|
||||
socket.once('close', () => {
|
||||
destroy();
|
||||
});
|
||||
socket.on('error', e => options?.console?.log('client stream ended'));
|
||||
}
|
||||
@@ -1,8 +1,6 @@
|
||||
import path from 'path'
|
||||
import { AutoenableMixinProvider } from '@scrypted/common/src/autoenable-mixin-provider';
|
||||
import { getDebugModeH264EncoderArgs, getH264EncoderArgs } from '@scrypted/common/src/ffmpeg-hardware-acceleration';
|
||||
import { addVideoFilterArguments } from '@scrypted/common/src/ffmpeg-helpers';
|
||||
import { ParserOptions, ParserSession, handleRebroadcasterClient, startParserSession } from '@scrypted/common/src/ffmpeg-rebroadcast';
|
||||
import { ListenZeroSingleClientTimeoutError, closeQuiet, listenZeroSingleClient } from '@scrypted/common/src/listen-cluster';
|
||||
import { readLength } from '@scrypted/common/src/read-stream';
|
||||
import { H264_NAL_TYPE_FU_B, H264_NAL_TYPE_IDR, H264_NAL_TYPE_MTAP16, H264_NAL_TYPE_MTAP32, H264_NAL_TYPE_RESERVED0, H264_NAL_TYPE_RESERVED30, H264_NAL_TYPE_RESERVED31, H264_NAL_TYPE_SEI, H264_NAL_TYPE_STAP_B, RtspServer, RtspTrack, createRtspParser, findH264NaluType, getNaluTypes, listenSingleRtspClient } from '@scrypted/common/src/rtsp-server';
|
||||
@@ -10,14 +8,16 @@ import { addTrackControls, parseSdp } from '@scrypted/common/src/sdp-utils';
|
||||
import { SettingsMixinDeviceBase, SettingsMixinDeviceOptions } from "@scrypted/common/src/settings-mixin";
|
||||
import { sleep } from '@scrypted/common/src/sleep';
|
||||
import { StreamChunk, StreamParser } from '@scrypted/common/src/stream-parser';
|
||||
import sdk, { BufferConverter, ChargeState, DeviceProvider, DeviceState, EventListenerRegister, FFmpegInput, H264Info, MediaObject, MediaStreamDestination, MediaStreamOptions, MixinProvider, RequestMediaStreamOptions, ResponseMediaStreamOptions, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting, SettingValue, Settings, VideoCamera, VideoCameraConfiguration, WritableDeviceState } from '@scrypted/sdk';
|
||||
import sdk, { BufferConverter, ChargeState, DeviceProvider, EventListenerRegister, FFmpegInput, H264Info, MediaObject, MediaStreamDestination, MediaStreamOptions, MixinProvider, RequestMediaStreamOptions, ResponseMediaStreamOptions, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting, SettingValue, Settings, VideoCamera, VideoCameraConfiguration, WritableDeviceState } from '@scrypted/sdk';
|
||||
import { StorageSettings } from '@scrypted/sdk/storage-settings';
|
||||
import crypto from 'crypto';
|
||||
import { once } from 'events';
|
||||
import net, { AddressInfo } from 'net';
|
||||
import path from 'path';
|
||||
import semver from 'semver';
|
||||
import { Duplex } from 'stream';
|
||||
import { Worker } from 'worker_threads';
|
||||
import { ParserOptions, ParserSession, startParserSession } from './ffmpeg-rebroadcast';
|
||||
import { FileRtspServer } from './file-rtsp-server';
|
||||
import { getUrlLocalAdresses } from './local-addresses';
|
||||
import { REBROADCAST_MIXIN_INTERFACE_TOKEN } from './rebroadcast-mixin-token';
|
||||
@@ -41,13 +41,6 @@ interface PrebufferStreamChunk extends StreamChunk {
|
||||
time?: number;
|
||||
}
|
||||
|
||||
type Prebuffers<T extends string> = {
|
||||
[key in T]: PrebufferStreamChunk[];
|
||||
}
|
||||
|
||||
type PrebufferParsers = 'rtsp';
|
||||
const PrebufferParserValues: PrebufferParsers[] = ['rtsp'];
|
||||
|
||||
function hasOddities(h264Info: H264Info) {
|
||||
const h264Oddities = h264Info.fuab
|
||||
|| h264Info.mtap16
|
||||
@@ -60,13 +53,13 @@ function hasOddities(h264Info: H264Info) {
|
||||
return h264Oddities;
|
||||
}
|
||||
|
||||
type PrebufferParsers = 'rtsp';
|
||||
|
||||
class PrebufferSession {
|
||||
|
||||
parserSessionPromise: Promise<ParserSession<PrebufferParsers>>;
|
||||
parserSession: ParserSession<PrebufferParsers>;
|
||||
prebuffers: Prebuffers<PrebufferParsers> = {
|
||||
rtsp: [],
|
||||
};
|
||||
rtspPrebuffer: PrebufferStreamChunk[] = []
|
||||
parsers: { [container: string]: StreamParser };
|
||||
sdp: Promise<string>;
|
||||
usingScryptedParser = false;
|
||||
@@ -148,10 +141,10 @@ class PrebufferSession {
|
||||
|
||||
getDetectedIdrInterval() {
|
||||
const durations: number[] = [];
|
||||
if (this.prebuffers.rtsp.length) {
|
||||
if (this.rtspPrebuffer.length) {
|
||||
let last: number;
|
||||
|
||||
for (const chunk of this.prebuffers.rtsp) {
|
||||
for (const chunk of this.rtspPrebuffer) {
|
||||
if (findH264NaluType(chunk, H264_NAL_TYPE_IDR)) {
|
||||
if (last)
|
||||
durations.push(chunk.time - last);
|
||||
@@ -176,9 +169,7 @@ class PrebufferSession {
|
||||
}
|
||||
|
||||
clearPrebuffers() {
|
||||
for (const prebuffer of PrebufferParserValues) {
|
||||
this.prebuffers[prebuffer] = [];
|
||||
}
|
||||
this.rtspPrebuffer = [];
|
||||
}
|
||||
|
||||
release() {
|
||||
@@ -251,7 +242,7 @@ class PrebufferSession {
|
||||
|
||||
let total = 0;
|
||||
let start = 0;
|
||||
for (const prebuffer of this.prebuffers.rtsp) {
|
||||
for (const prebuffer of this.rtspPrebuffer) {
|
||||
start = start || prebuffer.time;
|
||||
for (const chunk of prebuffer.chunks) {
|
||||
total += chunk.byteLength;
|
||||
@@ -685,11 +676,10 @@ class PrebufferSession {
|
||||
session.killed.finally(() => clearTimeout(refreshTimeout));
|
||||
}
|
||||
|
||||
for (const container of PrebufferParserValues) {
|
||||
let shifts = 0;
|
||||
let prebufferContainer: PrebufferStreamChunk[] = this.prebuffers[container];
|
||||
let prebufferContainer: PrebufferStreamChunk[] = this.rtspPrebuffer;
|
||||
|
||||
session.on(container, (chunk: PrebufferStreamChunk) => {
|
||||
session.on('rtsp', (chunk: PrebufferStreamChunk) => {
|
||||
const now = Date.now();
|
||||
|
||||
chunk.time = now;
|
||||
@@ -702,11 +692,10 @@ class PrebufferSession {
|
||||
|
||||
if (shifts > 100000) {
|
||||
prebufferContainer = prebufferContainer.slice();
|
||||
this.prebuffers[container] = prebufferContainer;
|
||||
this.rtspPrebuffer = prebufferContainer;
|
||||
shifts = 0;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
session.start();
|
||||
return session;
|
||||
@@ -783,19 +772,24 @@ class PrebufferSession {
|
||||
async handleRebroadcasterClient(options: {
|
||||
findSyncFrame: boolean,
|
||||
isActiveClient: boolean,
|
||||
container: PrebufferParsers,
|
||||
session: ParserSession<PrebufferParsers>,
|
||||
socketPromise: Promise<Duplex>,
|
||||
requestedPrebuffer: number,
|
||||
filter?: (chunk: StreamChunk, prebuffer: boolean) => StreamChunk,
|
||||
}) {
|
||||
const { isActiveClient, container, session, socketPromise, requestedPrebuffer } = options;
|
||||
const { isActiveClient, session, socketPromise, requestedPrebuffer } = options;
|
||||
this.console.log('sending prebuffer', requestedPrebuffer);
|
||||
|
||||
let socket: Duplex;
|
||||
|
||||
try {
|
||||
socket = await socketPromise;
|
||||
|
||||
if (!session.isActive) {
|
||||
// session may be killed while waiting for socket.
|
||||
socket.destroy();
|
||||
throw new Error('session terminated before socket connected');
|
||||
}
|
||||
}
|
||||
catch (e) {
|
||||
// in case the client never connects, do an inactivity check.
|
||||
@@ -820,70 +814,81 @@ class PrebufferSession {
|
||||
this.inactivityCheck(session, isActiveClient);
|
||||
});
|
||||
|
||||
handleRebroadcasterClient(socket, {
|
||||
// console: this.console,
|
||||
connect: (connection) => {
|
||||
const now = Date.now();
|
||||
|
||||
const safeWriteData = (chunk: StreamChunk, prebuffer?: boolean) => {
|
||||
if (options.filter) {
|
||||
chunk = options.filter(chunk, prebuffer);
|
||||
if (!chunk)
|
||||
return;
|
||||
}
|
||||
const buffered = connection.writeData(chunk);
|
||||
if (buffered > 100000000) {
|
||||
this.console.log('more than 100MB has been buffered, did downstream die? killing connection.', this.streamName);
|
||||
cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
const cleanup = () => {
|
||||
session.removeListener(container, safeWriteData);
|
||||
session.removeListener('killed', cleanup);
|
||||
connection.destroy();
|
||||
}
|
||||
|
||||
session.on(container, safeWriteData);
|
||||
session.once('killed', cleanup);
|
||||
|
||||
const prebufferContainer: PrebufferStreamChunk[] = this.prebuffers[container];
|
||||
// if the requested container or the source container is not rtsp, use an exact seek.
|
||||
// this works better when the requested container is mp4, and rtsp is the source.
|
||||
// if starting on a sync frame, ffmpeg will skip the first segment while initializing
|
||||
// on live sources like rtsp. the buffer before the sync frame stream will be enough
|
||||
// for ffmpeg to analyze and start up in time for the sync frame.
|
||||
// may be worth considering playing with a few other things to avoid this:
|
||||
// mpeg-ts as a container (would need to write a muxer)
|
||||
// specifying the buffer before the sync frame with probesize.
|
||||
// If h264 oddities are detected, assume ffmpeg will be used.
|
||||
if (container !== 'rtsp' || !options.findSyncFrame || this.getLastH264Oddities()) {
|
||||
for (const chunk of prebufferContainer) {
|
||||
if (chunk.time < now - requestedPrebuffer)
|
||||
continue;
|
||||
|
||||
safeWriteData(chunk, true);
|
||||
}
|
||||
}
|
||||
else {
|
||||
const parser = this.parsers[container];
|
||||
const filtered = prebufferContainer.filter(pb => pb.time >= now - requestedPrebuffer);
|
||||
let availablePrebuffers = parser.findSyncFrame(filtered);
|
||||
if (!availablePrebuffers) {
|
||||
this.console.warn('Unable to find sync frame in rtsp prebuffer.');
|
||||
availablePrebuffers = [];
|
||||
}
|
||||
else {
|
||||
this.console.log('Found sync frame in rtsp prebuffer.');
|
||||
}
|
||||
for (const prebuffer of availablePrebuffers) {
|
||||
safeWriteData(prebuffer, true);
|
||||
}
|
||||
}
|
||||
|
||||
return cleanup;
|
||||
let writeData = (data: StreamChunk): number => {
|
||||
if (data.startStream) {
|
||||
socket.write(data.startStream)
|
||||
}
|
||||
})
|
||||
|
||||
const writeDataWithoutStartStream = (data: StreamChunk) => {
|
||||
for (const chunk of data.chunks) {
|
||||
socket.write(chunk);
|
||||
}
|
||||
|
||||
return socket.writableLength;
|
||||
};
|
||||
|
||||
writeData = writeDataWithoutStartStream;
|
||||
return writeDataWithoutStartStream(data);
|
||||
}
|
||||
|
||||
const safeWriteData = (chunk: StreamChunk, prebuffer?: boolean) => {
|
||||
if (options.filter) {
|
||||
chunk = options.filter(chunk, prebuffer);
|
||||
if (!chunk)
|
||||
return;
|
||||
}
|
||||
const buffered = writeData(chunk);
|
||||
if (buffered > 100000000) {
|
||||
this.console.log('more than 100MB has been buffered, did downstream die? killing connection.', this.streamName);
|
||||
cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
const cleanup = () => {
|
||||
socket.destroy();
|
||||
session.removeListener('rtsp', safeWriteData);
|
||||
session.removeListener('killed', cleanup);
|
||||
};
|
||||
|
||||
session.on('rtsp', safeWriteData);
|
||||
session.once('killed', cleanup);
|
||||
|
||||
socket.once('close', () => {
|
||||
cleanup();
|
||||
});
|
||||
|
||||
// socket.on('error', e => this.console.log('client stream ended'));
|
||||
|
||||
|
||||
const now = Date.now();
|
||||
const prebufferContainer: PrebufferStreamChunk[] = this.rtspPrebuffer;
|
||||
// if starting on a sync frame, ffmpeg will skip the first segment while initializing
|
||||
// on live sources like rtsp. the buffer before the sync frame stream will be enough
|
||||
// for ffmpeg to analyze and start up in time for the sync frame.
|
||||
// If h264 oddities are detected, assume ffmpeg will be used.
|
||||
if (!options.findSyncFrame || this.getLastH264Oddities()) {
|
||||
for (const chunk of prebufferContainer) {
|
||||
if (chunk.time < now - requestedPrebuffer)
|
||||
continue;
|
||||
|
||||
safeWriteData(chunk, true);
|
||||
}
|
||||
}
|
||||
else {
|
||||
const parser = this.parsers['rtsp'];
|
||||
const filtered = prebufferContainer.filter(pb => pb.time >= now - requestedPrebuffer);
|
||||
let availablePrebuffers = parser.findSyncFrame(filtered);
|
||||
if (!availablePrebuffers) {
|
||||
this.console.warn('Unable to find sync frame in rtsp prebuffer.');
|
||||
availablePrebuffers = [];
|
||||
}
|
||||
else {
|
||||
this.console.log('Found sync frame in rtsp prebuffer.');
|
||||
}
|
||||
for (const prebuffer of availablePrebuffers) {
|
||||
safeWriteData(prebuffer, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async getVideoStream(findSyncFrame: boolean, options?: RequestMediaStreamOptions) {
|
||||
@@ -1010,8 +1015,6 @@ class PrebufferSession {
|
||||
urls = await getUrlLocalAdresses(this.console, url);
|
||||
}
|
||||
|
||||
const container = 'rtsp';
|
||||
|
||||
mediaStreamOptions.sdp = sdp;
|
||||
|
||||
const isActiveClient = options?.refresh !== false;
|
||||
@@ -1019,7 +1022,6 @@ class PrebufferSession {
|
||||
this.handleRebroadcasterClient({
|
||||
findSyncFrame,
|
||||
isActiveClient,
|
||||
container,
|
||||
requestedPrebuffer,
|
||||
socketPromise,
|
||||
session,
|
||||
@@ -1045,7 +1047,7 @@ class PrebufferSession {
|
||||
|
||||
const now = Date.now();
|
||||
let available = 0;
|
||||
const prebufferContainer: PrebufferStreamChunk[] = this.prebuffers[container];
|
||||
const prebufferContainer: PrebufferStreamChunk[] = this.rtspPrebuffer;
|
||||
for (const prebuffer of prebufferContainer) {
|
||||
if (prebuffer.time < now - requestedPrebuffer)
|
||||
continue;
|
||||
@@ -1066,11 +1068,11 @@ class PrebufferSession {
|
||||
const ffmpegInput: FFmpegInput = {
|
||||
url,
|
||||
urls,
|
||||
container,
|
||||
container: 'rtsp',
|
||||
inputArguments: [
|
||||
...inputArguments,
|
||||
...(this.parsers[container].inputArguments || []),
|
||||
'-f', this.parsers[container].container,
|
||||
...(this.parsers['rtsp'].inputArguments || []),
|
||||
'-f', this.parsers['rtsp'].container,
|
||||
'-i', url,
|
||||
],
|
||||
mediaStreamOptions,
|
||||
@@ -1165,7 +1167,6 @@ class PrebufferMixin extends SettingsMixinDeviceBase<VideoCamera> implements Vid
|
||||
prebufferSession.handleRebroadcasterClient({
|
||||
findSyncFrame: true,
|
||||
isActiveClient: true,
|
||||
container: 'rtsp',
|
||||
session,
|
||||
socketPromise: Promise.resolve(client),
|
||||
requestedPrebuffer,
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import { cloneDeep } from "@scrypted/common/src/clone-deep";
|
||||
import { ParserOptions, ParserSession, setupActivityTimer } from "@scrypted/common/src/ffmpeg-rebroadcast";
|
||||
import { read16BELengthLoop } from "@scrypted/common/src/read-stream";
|
||||
import { findH264NaluType, H264_NAL_TYPE_SPS, RTSP_FRAME_MAGIC } from "@scrypted/common/src/rtsp-server";
|
||||
import { H264_NAL_TYPE_SPS, RTSP_FRAME_MAGIC, findH264NaluType } from "@scrypted/common/src/rtsp-server";
|
||||
import { parseSdp } from "@scrypted/common/src/sdp-utils";
|
||||
import { sleep } from "@scrypted/common/src/sleep";
|
||||
import { StreamChunk } from "@scrypted/common/src/stream-parser";
|
||||
import { MediaStreamOptions, ResponseMediaStreamOptions } from "@scrypted/sdk";
|
||||
import { parse as spsParse } from "h264-sps-parser";
|
||||
import net from 'net';
|
||||
import { EventEmitter, Readable } from "stream";
|
||||
import { ParserSession, setupActivityTimer } from "./ffmpeg-rebroadcast";
|
||||
import { getSpsResolution } from "./sps-resolution";
|
||||
|
||||
export function negotiateMediaStream(sdp: string, mediaStreamOptions: MediaStreamOptions, inputVideoCodec: string, inputAudioCodec: string, requestMediaStream: MediaStreamOptions) {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
import { ParserSession, setupActivityTimer } from "@scrypted/common/src/ffmpeg-rebroadcast";
|
||||
import { closeQuiet, createBindZero } from "@scrypted/common/src/listen-cluster";
|
||||
import { findH264NaluType, H264_NAL_TYPE_SPS, parseSemicolonDelimited, RtspClient, RtspClientUdpSetupOptions, RTSP_FRAME_MAGIC } from "@scrypted/common/src/rtsp-server";
|
||||
import { closeQuiet } from "@scrypted/common/src/listen-cluster";
|
||||
import { H264_NAL_TYPE_SPS, RTSP_FRAME_MAGIC, RtspClient, RtspClientUdpSetupOptions, findH264NaluType, parseSemicolonDelimited } from "@scrypted/common/src/rtsp-server";
|
||||
import { parseSdp } from "@scrypted/common/src/sdp-utils";
|
||||
import { StreamChunk } from "@scrypted/common/src/stream-parser";
|
||||
import { ResponseMediaStreamOptions } from "@scrypted/sdk";
|
||||
import dgram from 'dgram';
|
||||
import { parse as spsParse } from "h264-sps-parser";
|
||||
import { EventEmitter } from "stream";
|
||||
import { ParserSession, setupActivityTimer } from "./ffmpeg-rebroadcast";
|
||||
import { negotiateMediaStream } from "./rfc4571";
|
||||
import { getSpsResolution } from "./sps-resolution";
|
||||
|
||||
|
||||
4
plugins/ring/package-lock.json
generated
4
plugins/ring/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/ring",
|
||||
"version": "0.0.137",
|
||||
"version": "0.0.138",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/ring",
|
||||
"version": "0.0.137",
|
||||
"version": "0.0.138",
|
||||
"dependencies": {
|
||||
"@koush/ring-client-api": "file:../../external/ring-client-api",
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -44,5 +44,5 @@
|
||||
"got": "11.8.6",
|
||||
"socket.io-client": "^2.5.0"
|
||||
},
|
||||
"version": "0.0.137"
|
||||
"version": "0.0.138"
|
||||
}
|
||||
|
||||
6
plugins/rknn/package-lock.json
generated
6
plugins/rknn/package-lock.json
generated
@@ -1,19 +1,19 @@
|
||||
{
|
||||
"name": "@scrypted/rknn",
|
||||
"version": "0.0.4",
|
||||
"version": "0.1.1",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/rknn",
|
||||
"version": "0.0.4",
|
||||
"version": "0.1.1",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
},
|
||||
"../../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.3.29",
|
||||
"version": "0.3.31",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
|
||||
@@ -39,11 +39,12 @@
|
||||
"type": "API",
|
||||
"interfaces": [
|
||||
"ObjectDetection",
|
||||
"ObjectDetectionPreview"
|
||||
"ObjectDetectionPreview",
|
||||
"DeviceProvider"
|
||||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.0.4"
|
||||
"version": "0.1.1"
|
||||
}
|
||||
|
||||
0
plugins/rknn/src/det_utils/__init__.py
Normal file
0
plugins/rknn/src/det_utils/__init__.py
Normal file
269
plugins/rknn/src/det_utils/db_postprocess.py
Normal file
269
plugins/rknn/src/det_utils/db_postprocess.py
Normal file
@@ -0,0 +1,269 @@
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
This code is refered from:
|
||||
https://github.com/WenmuZhou/DBNet.pytorch/blob/master/post_processing/seg_detector_representer.py
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
# import paddle
|
||||
from shapely.geometry import Polygon
|
||||
import pyclipper
|
||||
|
||||
|
||||
class DBPostProcess(object):
|
||||
"""
|
||||
The post process for Differentiable Binarization (DB).
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
thresh=0.3,
|
||||
box_thresh=0.7,
|
||||
max_candidates=1000,
|
||||
unclip_ratio=2.0,
|
||||
use_dilation=False,
|
||||
score_mode="fast",
|
||||
**kwargs):
|
||||
self.thresh = thresh
|
||||
self.box_thresh = box_thresh
|
||||
self.max_candidates = max_candidates
|
||||
self.unclip_ratio = unclip_ratio
|
||||
self.min_size = 3
|
||||
self.score_mode = score_mode
|
||||
assert score_mode in [
|
||||
"slow", "fast"
|
||||
], "Score mode must be in [slow, fast] but got: {}".format(score_mode)
|
||||
|
||||
self.dilation_kernel = None if not use_dilation else np.array(
|
||||
[[1, 1], [1, 1]])
|
||||
|
||||
def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
|
||||
'''
|
||||
_bitmap: single map with shape (1, H, W),
|
||||
whose values are binarized as {0, 1}
|
||||
'''
|
||||
|
||||
bitmap = _bitmap
|
||||
height, width = bitmap.shape
|
||||
|
||||
outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST,
|
||||
cv2.CHAIN_APPROX_SIMPLE)
|
||||
if len(outs) == 3:
|
||||
img, contours, _ = outs[0], outs[1], outs[2]
|
||||
elif len(outs) == 2:
|
||||
contours, _ = outs[0], outs[1]
|
||||
|
||||
num_contours = min(len(contours), self.max_candidates)
|
||||
|
||||
boxes = []
|
||||
scores = []
|
||||
for index in range(num_contours):
|
||||
contour = contours[index]
|
||||
points, sside = self.get_mini_boxes(contour)
|
||||
if sside < self.min_size:
|
||||
continue
|
||||
points = np.array(points)
|
||||
if self.score_mode == "fast":
|
||||
score = self.box_score_fast(pred, points.reshape(-1, 2))
|
||||
else:
|
||||
score = self.box_score_slow(pred, contour)
|
||||
if self.box_thresh > score:
|
||||
continue
|
||||
|
||||
box = self.unclip(points).reshape(-1, 1, 2)
|
||||
box, sside = self.get_mini_boxes(box)
|
||||
if sside < self.min_size + 2:
|
||||
continue
|
||||
box = np.array(box)
|
||||
|
||||
box[:, 0] = np.clip(
|
||||
np.round(box[:, 0] / width * dest_width), 0, dest_width)
|
||||
box[:, 1] = np.clip(
|
||||
np.round(box[:, 1] / height * dest_height), 0, dest_height)
|
||||
boxes.append(box.astype(np.int16))
|
||||
scores.append(score)
|
||||
return np.array(boxes, dtype=np.int16), scores
|
||||
|
||||
def unclip(self, box):
|
||||
unclip_ratio = self.unclip_ratio
|
||||
poly = Polygon(box)
|
||||
distance = poly.area * unclip_ratio / poly.length
|
||||
offset = pyclipper.PyclipperOffset()
|
||||
offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
|
||||
expanded = np.array(offset.Execute(distance))
|
||||
return expanded
|
||||
|
||||
def get_mini_boxes(self, contour):
|
||||
bounding_box = cv2.minAreaRect(contour)
|
||||
points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
|
||||
|
||||
index_1, index_2, index_3, index_4 = 0, 1, 2, 3
|
||||
if points[1][1] > points[0][1]:
|
||||
index_1 = 0
|
||||
index_4 = 1
|
||||
else:
|
||||
index_1 = 1
|
||||
index_4 = 0
|
||||
if points[3][1] > points[2][1]:
|
||||
index_2 = 2
|
||||
index_3 = 3
|
||||
else:
|
||||
index_2 = 3
|
||||
index_3 = 2
|
||||
|
||||
box = [
|
||||
points[index_1], points[index_2], points[index_3], points[index_4]
|
||||
]
|
||||
return box, min(bounding_box[1])
|
||||
|
||||
def box_score_fast(self, bitmap, _box):
|
||||
'''
|
||||
box_score_fast: use bbox mean score as the mean score
|
||||
'''
|
||||
h, w = bitmap.shape[:2]
|
||||
box = _box.copy()
|
||||
xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int32), 0, w - 1)
|
||||
xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int32), 0, w - 1)
|
||||
ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int32), 0, h - 1)
|
||||
ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int32), 0, h - 1)
|
||||
|
||||
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
|
||||
box[:, 0] = box[:, 0] - xmin
|
||||
box[:, 1] = box[:, 1] - ymin
|
||||
cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
|
||||
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
|
||||
|
||||
def box_score_slow(self, bitmap, contour):
|
||||
'''
|
||||
box_score_slow: use polyon mean score as the mean score
|
||||
'''
|
||||
h, w = bitmap.shape[:2]
|
||||
contour = contour.copy()
|
||||
contour = np.reshape(contour, (-1, 2))
|
||||
|
||||
xmin = np.clip(np.min(contour[:, 0]), 0, w - 1)
|
||||
xmax = np.clip(np.max(contour[:, 0]), 0, w - 1)
|
||||
ymin = np.clip(np.min(contour[:, 1]), 0, h - 1)
|
||||
ymax = np.clip(np.max(contour[:, 1]), 0, h - 1)
|
||||
|
||||
mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
|
||||
|
||||
contour[:, 0] = contour[:, 0] - xmin
|
||||
contour[:, 1] = contour[:, 1] - ymin
|
||||
|
||||
cv2.fillPoly(mask, contour.reshape(1, -1, 2).astype(np.int32), 1)
|
||||
return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
|
||||
|
||||
def __call__(self, outs_dict, shape_list):
|
||||
pred = outs_dict['maps']
|
||||
# if isinstance(pred, paddle.Tensor):
|
||||
# pred = pred.numpy()
|
||||
pred = pred[:, 0, :, :]
|
||||
segmentation = pred > self.thresh
|
||||
|
||||
boxes_batch = []
|
||||
for batch_index in range(pred.shape[0]):
|
||||
src_h, src_w, ratio_h, ratio_w = shape_list[batch_index]
|
||||
if self.dilation_kernel is not None:
|
||||
mask = cv2.dilate(
|
||||
np.array(segmentation[batch_index]).astype(np.uint8),
|
||||
self.dilation_kernel)
|
||||
else:
|
||||
mask = segmentation[batch_index]
|
||||
boxes, scores = self.boxes_from_bitmap(pred[batch_index], mask,
|
||||
src_w, src_h)
|
||||
|
||||
boxes_batch.append({'points': boxes})
|
||||
return boxes_batch
|
||||
|
||||
|
||||
class DistillationDBPostProcess(object):
|
||||
def __init__(self,
|
||||
model_name=["student"],
|
||||
key=None,
|
||||
thresh=0.3,
|
||||
box_thresh=0.6,
|
||||
max_candidates=1000,
|
||||
unclip_ratio=1.5,
|
||||
use_dilation=False,
|
||||
score_mode="fast",
|
||||
**kwargs):
|
||||
self.model_name = model_name
|
||||
self.key = key
|
||||
self.post_process = DBPostProcess(
|
||||
thresh=thresh,
|
||||
box_thresh=box_thresh,
|
||||
max_candidates=max_candidates,
|
||||
unclip_ratio=unclip_ratio,
|
||||
use_dilation=use_dilation,
|
||||
score_mode=score_mode)
|
||||
|
||||
def __call__(self, predicts, shape_list):
|
||||
results = {}
|
||||
for k in self.model_name:
|
||||
results[k] = self.post_process(predicts[k], shape_list=shape_list)
|
||||
return results
|
||||
|
||||
|
||||
class DetPostProcess(object):
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
def order_points_clockwise(self, pts):
|
||||
"""
|
||||
reference from: https://github.com/jrosebr1/imutils/blob/master/imutils/perspective.py
|
||||
# sort the points based on their x-coordinates
|
||||
"""
|
||||
xSorted = pts[np.argsort(pts[:, 0]), :]
|
||||
|
||||
# grab the left-most and right-most points from the sorted
|
||||
# x-roodinate points
|
||||
leftMost = xSorted[:2, :]
|
||||
rightMost = xSorted[2:, :]
|
||||
|
||||
# now, sort the left-most coordinates according to their
|
||||
# y-coordinates so we can grab the top-left and bottom-left
|
||||
# points, respectively
|
||||
leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
|
||||
(tl, bl) = leftMost
|
||||
|
||||
rightMost = rightMost[np.argsort(rightMost[:, 1]), :]
|
||||
(tr, br) = rightMost
|
||||
|
||||
rect = np.array([tl, tr, br, bl], dtype="float32")
|
||||
return rect
|
||||
|
||||
def clip_det_res(self, points, img_height, img_width):
|
||||
for pno in range(points.shape[0]):
|
||||
points[pno, 0] = int(min(max(points[pno, 0], 0), img_width - 1))
|
||||
points[pno, 1] = int(min(max(points[pno, 1], 0), img_height - 1))
|
||||
return points
|
||||
|
||||
def filter_tag_det_res(self, dt_boxes, image_shape):
|
||||
img_height, img_width = image_shape[0:2]
|
||||
dt_boxes_new = []
|
||||
for box in dt_boxes:
|
||||
box = self.order_points_clockwise(box)
|
||||
box = self.clip_det_res(box, img_height, img_width)
|
||||
rect_width = int(np.linalg.norm(box[0] - box[1]))
|
||||
rect_height = int(np.linalg.norm(box[0] - box[3]))
|
||||
if rect_width <= 3 or rect_height <= 3:
|
||||
continue
|
||||
dt_boxes_new.append(box)
|
||||
dt_boxes = np.array(dt_boxes_new)
|
||||
return dt_boxes
|
||||
373
plugins/rknn/src/det_utils/operators.py
Normal file
373
plugins/rknn/src/det_utils/operators.py
Normal file
@@ -0,0 +1,373 @@
|
||||
"""
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import sys
|
||||
import six
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
class DecodeImage(object):
|
||||
""" decode image """
|
||||
|
||||
def __init__(self, img_mode='RGB', channel_first=False, **kwargs):
|
||||
self.img_mode = img_mode
|
||||
self.channel_first = channel_first
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
if six.PY2:
|
||||
assert type(img) is str and len(
|
||||
img) > 0, "invalid input 'img' in DecodeImage"
|
||||
else:
|
||||
assert type(img) is bytes and len(
|
||||
img) > 0, "invalid input 'img' in DecodeImage"
|
||||
img = np.frombuffer(img, dtype='uint8')
|
||||
img = cv2.imdecode(img, 1)
|
||||
if img is None:
|
||||
return None
|
||||
if self.img_mode == 'GRAY':
|
||||
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
||||
elif self.img_mode == 'RGB':
|
||||
assert img.shape[2] == 3, 'invalid shape of image[%s]' % (img.shape)
|
||||
img = img[:, :, ::-1]
|
||||
|
||||
if self.channel_first:
|
||||
img = img.transpose((2, 0, 1))
|
||||
|
||||
data['image'] = img
|
||||
return data
|
||||
|
||||
|
||||
class NRTRDecodeImage(object):
|
||||
""" decode image """
|
||||
|
||||
def __init__(self, img_mode='RGB', channel_first=False, **kwargs):
|
||||
self.img_mode = img_mode
|
||||
self.channel_first = channel_first
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
if six.PY2:
|
||||
assert type(img) is str and len(
|
||||
img) > 0, "invalid input 'img' in DecodeImage"
|
||||
else:
|
||||
assert type(img) is bytes and len(
|
||||
img) > 0, "invalid input 'img' in DecodeImage"
|
||||
img = np.frombuffer(img, dtype='uint8')
|
||||
|
||||
img = cv2.imdecode(img, 1)
|
||||
|
||||
if img is None:
|
||||
return None
|
||||
if self.img_mode == 'GRAY':
|
||||
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
||||
elif self.img_mode == 'RGB':
|
||||
assert img.shape[2] == 3, 'invalid shape of image[%s]' % (img.shape)
|
||||
img = img[:, :, ::-1]
|
||||
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
||||
if self.channel_first:
|
||||
img = img.transpose((2, 0, 1))
|
||||
data['image'] = img
|
||||
return data
|
||||
|
||||
class NormalizeImage(object):
|
||||
""" normalize image such as substract mean, divide std
|
||||
"""
|
||||
|
||||
def __init__(self, scale=None, mean=None, std=None, order='chw', **kwargs):
|
||||
if isinstance(scale, str):
|
||||
scale = eval(scale)
|
||||
self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
|
||||
mean = mean if mean is not None else [0.485, 0.456, 0.406]
|
||||
std = std if std is not None else [0.229, 0.224, 0.225]
|
||||
|
||||
shape = (3, 1, 1) if order == 'chw' else (1, 1, 3)
|
||||
self.mean = np.array(mean).reshape(shape).astype('float32')
|
||||
self.std = np.array(std).reshape(shape).astype('float32')
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
from PIL import Image
|
||||
if isinstance(img, Image.Image):
|
||||
img = np.array(img)
|
||||
|
||||
assert isinstance(img,
|
||||
np.ndarray), "invalid input 'img' in NormalizeImage"
|
||||
data['image'] = (
|
||||
img.astype('float32') * self.scale - self.mean) / self.std
|
||||
return data
|
||||
|
||||
|
||||
class ToCHWImage(object):
|
||||
""" convert hwc image to chw image
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
from PIL import Image
|
||||
if isinstance(img, Image.Image):
|
||||
img = np.array(img)
|
||||
data['image'] = img.transpose((2, 0, 1))
|
||||
return data
|
||||
|
||||
|
||||
class KeepKeys(object):
|
||||
def __init__(self, keep_keys, **kwargs):
|
||||
self.keep_keys = keep_keys
|
||||
|
||||
def __call__(self, data):
|
||||
data_list = []
|
||||
for key in self.keep_keys:
|
||||
data_list.append(data[key])
|
||||
return data_list
|
||||
|
||||
|
||||
class DetResizeForTest(object):
|
||||
def __init__(self, **kwargs):
|
||||
super(DetResizeForTest, self).__init__()
|
||||
self.square_input = True
|
||||
self.resize_type = 0
|
||||
if 'image_shape' in kwargs:
|
||||
self.image_shape = kwargs['image_shape']
|
||||
self.resize_type = 1
|
||||
elif 'limit_side_len' in kwargs:
|
||||
self.limit_side_len = kwargs['limit_side_len']
|
||||
self.limit_type = kwargs.get('limit_type', 'min')
|
||||
elif 'resize_long' in kwargs:
|
||||
self.resize_type = 2
|
||||
self.resize_long = kwargs.get('resize_long', 960)
|
||||
else:
|
||||
self.limit_side_len = 736
|
||||
self.limit_type = 'min'
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
src_h, src_w, _ = img.shape
|
||||
|
||||
if self.resize_type == 0:
|
||||
# img, shape = self.resize_image_type0(img)
|
||||
img, [ratio_h, ratio_w] = self.resize_image_type0(img)
|
||||
elif self.resize_type == 2:
|
||||
img, [ratio_h, ratio_w] = self.resize_image_type2(img)
|
||||
else:
|
||||
# img, shape = self.resize_image_type1(img)
|
||||
img, [ratio_h, ratio_w] = self.resize_image_type1(img)
|
||||
|
||||
|
||||
|
||||
data['image'] = img
|
||||
data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
|
||||
if len(data['shape'].shape) == 1:
|
||||
data['shape'] = np.expand_dims(data['shape'], axis=0)
|
||||
return data
|
||||
|
||||
def resize_image_type1(self, img):
|
||||
resize_h, resize_w = self.image_shape
|
||||
ori_h, ori_w = img.shape[:2] # (h, w, c)
|
||||
ratio_h = float(resize_h) / ori_h
|
||||
ratio_w = float(resize_w) / ori_w
|
||||
img = cv2.resize(img, (int(resize_w), int(resize_h)))
|
||||
# return img, np.array([ori_h, ori_w])
|
||||
return img, [ratio_h, ratio_w]
|
||||
|
||||
def resize_image_type0(self, img):
|
||||
"""
|
||||
resize image to a size multiple of 32 which is required by the network
|
||||
args:
|
||||
img(array): array with shape [h, w, c]
|
||||
return(tuple):
|
||||
img, (ratio_h, ratio_w)
|
||||
"""
|
||||
limit_side_len = self.limit_side_len
|
||||
h, w, c = img.shape
|
||||
|
||||
# limit the max side
|
||||
if self.limit_type == 'max':
|
||||
if max(h, w) > limit_side_len:
|
||||
if h > w:
|
||||
ratio = float(limit_side_len) / h
|
||||
else:
|
||||
ratio = float(limit_side_len) / w
|
||||
else:
|
||||
ratio = 1.
|
||||
elif self.limit_type == 'min':
|
||||
if min(h, w) < limit_side_len:
|
||||
if h < w:
|
||||
ratio = float(limit_side_len) / h
|
||||
else:
|
||||
ratio = float(limit_side_len) / w
|
||||
else:
|
||||
ratio = 1.
|
||||
elif self.limit_type == 'resize_long':
|
||||
ratio = float(limit_side_len) / max(h,w)
|
||||
else:
|
||||
raise Exception('not support limit type, image ')
|
||||
resize_h = int(h * ratio)
|
||||
resize_w = int(w * ratio)
|
||||
|
||||
resize_h = max(int(round(resize_h / 32) * 32), 32)
|
||||
resize_w = max(int(round(resize_w / 32) * 32), 32)
|
||||
|
||||
try:
|
||||
if int(resize_w) <= 0 or int(resize_h) <= 0:
|
||||
return None, (None, None)
|
||||
img = cv2.resize(img, (int(resize_w), int(resize_h)))
|
||||
except:
|
||||
print(img.shape, resize_w, resize_h)
|
||||
sys.exit(0)
|
||||
ratio_h = resize_h / float(h)
|
||||
ratio_w = resize_w / float(w)
|
||||
return img, [ratio_h, ratio_w]
|
||||
|
||||
def resize_image_type2(self, img):
|
||||
h, w, _ = img.shape
|
||||
|
||||
resize_w = w
|
||||
resize_h = h
|
||||
|
||||
if resize_h > resize_w:
|
||||
ratio = float(self.resize_long) / resize_h
|
||||
else:
|
||||
ratio = float(self.resize_long) / resize_w
|
||||
|
||||
resize_h = int(resize_h * ratio)
|
||||
resize_w = int(resize_w * ratio)
|
||||
|
||||
max_stride = 128
|
||||
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
|
||||
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
|
||||
img = cv2.resize(img, (int(resize_w), int(resize_h)))
|
||||
ratio_h = resize_h / float(h)
|
||||
ratio_w = resize_w / float(w)
|
||||
|
||||
return img, [ratio_h, ratio_w]
|
||||
|
||||
|
||||
class E2EResizeForTest(object):
|
||||
def __init__(self, **kwargs):
|
||||
super(E2EResizeForTest, self).__init__()
|
||||
self.max_side_len = kwargs['max_side_len']
|
||||
self.valid_set = kwargs['valid_set']
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
src_h, src_w, _ = img.shape
|
||||
if self.valid_set == 'totaltext':
|
||||
im_resized, [ratio_h, ratio_w] = self.resize_image_for_totaltext(
|
||||
img, max_side_len=self.max_side_len)
|
||||
else:
|
||||
im_resized, (ratio_h, ratio_w) = self.resize_image(
|
||||
img, max_side_len=self.max_side_len)
|
||||
data['image'] = im_resized
|
||||
data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
|
||||
return data
|
||||
|
||||
def resize_image_for_totaltext(self, im, max_side_len=512):
|
||||
|
||||
h, w, _ = im.shape
|
||||
resize_w = w
|
||||
resize_h = h
|
||||
ratio = 1.25
|
||||
if h * ratio > max_side_len:
|
||||
ratio = float(max_side_len) / resize_h
|
||||
resize_h = int(resize_h * ratio)
|
||||
resize_w = int(resize_w * ratio)
|
||||
|
||||
max_stride = 128
|
||||
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
|
||||
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
|
||||
im = cv2.resize(im, (int(resize_w), int(resize_h)))
|
||||
ratio_h = resize_h / float(h)
|
||||
ratio_w = resize_w / float(w)
|
||||
return im, (ratio_h, ratio_w)
|
||||
|
||||
def resize_image(self, im, max_side_len=512):
|
||||
"""
|
||||
resize image to a size multiple of max_stride which is required by the network
|
||||
:param im: the resized image
|
||||
:param max_side_len: limit of max image size to avoid out of memory in gpu
|
||||
:return: the resized image and the resize ratio
|
||||
"""
|
||||
h, w, _ = im.shape
|
||||
|
||||
resize_w = w
|
||||
resize_h = h
|
||||
|
||||
# Fix the longer side
|
||||
if resize_h > resize_w:
|
||||
ratio = float(max_side_len) / resize_h
|
||||
else:
|
||||
ratio = float(max_side_len) / resize_w
|
||||
|
||||
resize_h = int(resize_h * ratio)
|
||||
resize_w = int(resize_w * ratio)
|
||||
|
||||
max_stride = 128
|
||||
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
|
||||
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
|
||||
im = cv2.resize(im, (int(resize_w), int(resize_h)))
|
||||
ratio_h = resize_h / float(h)
|
||||
ratio_w = resize_w / float(w)
|
||||
|
||||
return im, (ratio_h, ratio_w)
|
||||
|
||||
|
||||
|
||||
class Pad_to_max_len(object):
|
||||
def __init__(self, **kwargs):
|
||||
super(Pad_to_max_len, self).__init__()
|
||||
self.max_h = kwargs['max_h']
|
||||
self.max_w = kwargs['max_w']
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
if img.shape[-1] == 3:
|
||||
# hwc
|
||||
if img.shape[0]!= self.max_h:
|
||||
# TODO support
|
||||
# assert False, "not support"
|
||||
pad_h = self.max_h - img.shape[0]
|
||||
pad_w = self.max_w - img.shape[1]
|
||||
img = np.pad(img, ((0, pad_h), (0, pad_w), (0, 0)), 'constant', constant_values=0)
|
||||
if img.shape[1] < self.max_w:
|
||||
pad_w = self.max_w - img.shape[1]
|
||||
img = np.pad(img, ((0, 0), (0, pad_w), (0, 0)), 'constant', constant_values=0)
|
||||
|
||||
elif img.shape[0] == 3:
|
||||
# chw
|
||||
img = img.transpose((1, 2, 0))
|
||||
if img.shape[1]!= self.max_h:
|
||||
# TODO support
|
||||
assert False, "not support"
|
||||
if img.shape[0] < self.max_w:
|
||||
pad_w = self.max_w - img.shape[0]
|
||||
img = np.pad(img, ((0, 0), (0, 0), (0, pad_w)), 'constant', constant_values=0)
|
||||
|
||||
else:
|
||||
assert False, "not support"
|
||||
|
||||
data['image'] = img
|
||||
|
||||
return data
|
||||
0
plugins/rknn/src/rec_utils/__init__.py
Normal file
0
plugins/rknn/src/rec_utils/__init__.py
Normal file
376
plugins/rknn/src/rec_utils/operators.py
Normal file
376
plugins/rknn/src/rec_utils/operators.py
Normal file
@@ -0,0 +1,376 @@
|
||||
"""
|
||||
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import sys
|
||||
import six
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
|
||||
class DecodeImage(object):
|
||||
""" decode image """
|
||||
|
||||
def __init__(self, img_mode='RGB', channel_first=False, **kwargs):
|
||||
self.img_mode = img_mode
|
||||
self.channel_first = channel_first
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
if six.PY2:
|
||||
assert type(img) is str and len(
|
||||
img) > 0, "invalid input 'img' in DecodeImage"
|
||||
else:
|
||||
assert type(img) is bytes and len(
|
||||
img) > 0, "invalid input 'img' in DecodeImage"
|
||||
img = np.frombuffer(img, dtype='uint8')
|
||||
img = cv2.imdecode(img, 1)
|
||||
if img is None:
|
||||
return None
|
||||
if self.img_mode == 'GRAY':
|
||||
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
||||
elif self.img_mode == 'RGB':
|
||||
assert img.shape[2] == 3, 'invalid shape of image[%s]' % (img.shape)
|
||||
img = img[:, :, ::-1]
|
||||
|
||||
if self.channel_first:
|
||||
img = img.transpose((2, 0, 1))
|
||||
|
||||
data['image'] = img
|
||||
return data
|
||||
|
||||
|
||||
class NRTRDecodeImage(object):
|
||||
""" decode image """
|
||||
|
||||
def __init__(self, img_mode='RGB', channel_first=False, **kwargs):
|
||||
self.img_mode = img_mode
|
||||
self.channel_first = channel_first
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
if six.PY2:
|
||||
assert type(img) is str and len(
|
||||
img) > 0, "invalid input 'img' in DecodeImage"
|
||||
else:
|
||||
assert type(img) is bytes and len(
|
||||
img) > 0, "invalid input 'img' in DecodeImage"
|
||||
img = np.frombuffer(img, dtype='uint8')
|
||||
|
||||
img = cv2.imdecode(img, 1)
|
||||
|
||||
if img is None:
|
||||
return None
|
||||
if self.img_mode == 'GRAY':
|
||||
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
||||
elif self.img_mode == 'RGB':
|
||||
assert img.shape[2] == 3, 'invalid shape of image[%s]' % (img.shape)
|
||||
img = img[:, :, ::-1]
|
||||
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
||||
if self.channel_first:
|
||||
img = img.transpose((2, 0, 1))
|
||||
data['image'] = img
|
||||
return data
|
||||
|
||||
class NormalizeImage(object):
|
||||
""" normalize image such as substract mean, divide std
|
||||
"""
|
||||
|
||||
def __init__(self, scale=None, mean=None, std=None, order='chw', **kwargs):
|
||||
if isinstance(scale, str):
|
||||
scale = eval(scale)
|
||||
self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
|
||||
mean = mean if mean is not None else [0.485, 0.456, 0.406]
|
||||
std = std if std is not None else [0.229, 0.224, 0.225]
|
||||
|
||||
shape = (3, 1, 1) if order == 'chw' else (1, 1, 3)
|
||||
self.mean = np.array(mean).reshape(shape).astype('float32')
|
||||
self.std = np.array(std).reshape(shape).astype('float32')
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
from PIL import Image
|
||||
if isinstance(img, Image.Image):
|
||||
img = np.array(img)
|
||||
|
||||
assert isinstance(img, np.ndarray), "invalid input 'img' in NormalizeImage"
|
||||
|
||||
i = img.astype('float32')
|
||||
i = i * self.scale
|
||||
i = i - self.mean
|
||||
i = i / self.std
|
||||
data['image'] = i
|
||||
return data
|
||||
|
||||
|
||||
class ToCHWImage(object):
|
||||
""" convert hwc image to chw image
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
pass
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
from PIL import Image
|
||||
if isinstance(img, Image.Image):
|
||||
img = np.array(img)
|
||||
data['image'] = img.transpose((2, 0, 1))
|
||||
return data
|
||||
|
||||
|
||||
class KeepKeys(object):
|
||||
def __init__(self, keep_keys, **kwargs):
|
||||
self.keep_keys = keep_keys
|
||||
|
||||
def __call__(self, data):
|
||||
data_list = []
|
||||
for key in self.keep_keys:
|
||||
data_list.append(data[key])
|
||||
return data_list
|
||||
|
||||
|
||||
class DetResizeForTest(object):
|
||||
def __init__(self, **kwargs):
|
||||
super(DetResizeForTest, self).__init__()
|
||||
self.square_input = True
|
||||
self.resize_type = 0
|
||||
if 'image_shape' in kwargs:
|
||||
self.image_shape = kwargs['image_shape']
|
||||
self.resize_type = 1
|
||||
elif 'limit_side_len' in kwargs:
|
||||
self.limit_side_len = kwargs['limit_side_len']
|
||||
self.limit_type = kwargs.get('limit_type', 'min')
|
||||
elif 'resize_long' in kwargs:
|
||||
self.resize_type = 2
|
||||
self.resize_long = kwargs.get('resize_long', 960)
|
||||
else:
|
||||
self.limit_side_len = 736
|
||||
self.limit_type = 'min'
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
src_h, src_w, _ = img.shape
|
||||
|
||||
if self.resize_type == 0:
|
||||
# img, shape = self.resize_image_type0(img)
|
||||
img, [ratio_h, ratio_w] = self.resize_image_type0(img)
|
||||
elif self.resize_type == 2:
|
||||
img, [ratio_h, ratio_w] = self.resize_image_type2(img)
|
||||
else:
|
||||
# img, shape = self.resize_image_type1(img)
|
||||
img, [ratio_h, ratio_w] = self.resize_image_type1(img)
|
||||
|
||||
|
||||
|
||||
data['image'] = img
|
||||
data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
|
||||
if len(data['shape'].shape) == 1:
|
||||
data['shape'] = np.expand_dims(data['shape'], axis=0)
|
||||
return data
|
||||
|
||||
def resize_image_type1(self, img):
|
||||
resize_h, resize_w = self.image_shape
|
||||
ori_h, ori_w = img.shape[:2] # (h, w, c)
|
||||
ratio_h = float(resize_h) / ori_h
|
||||
ratio_w = float(resize_w) / ori_w
|
||||
img = cv2.resize(img, (int(resize_w), int(resize_h)))
|
||||
# return img, np.array([ori_h, ori_w])
|
||||
return img, [ratio_h, ratio_w]
|
||||
|
||||
def resize_image_type0(self, img):
|
||||
"""
|
||||
resize image to a size multiple of 32 which is required by the network
|
||||
args:
|
||||
img(array): array with shape [h, w, c]
|
||||
return(tuple):
|
||||
img, (ratio_h, ratio_w)
|
||||
"""
|
||||
limit_side_len = self.limit_side_len
|
||||
h, w, c = img.shape
|
||||
|
||||
# limit the max side
|
||||
if self.limit_type == 'max':
|
||||
if max(h, w) > limit_side_len:
|
||||
if h > w:
|
||||
ratio = float(limit_side_len) / h
|
||||
else:
|
||||
ratio = float(limit_side_len) / w
|
||||
else:
|
||||
ratio = 1.
|
||||
elif self.limit_type == 'min':
|
||||
if min(h, w) < limit_side_len:
|
||||
if h < w:
|
||||
ratio = float(limit_side_len) / h
|
||||
else:
|
||||
ratio = float(limit_side_len) / w
|
||||
else:
|
||||
ratio = 1.
|
||||
elif self.limit_type == 'resize_long':
|
||||
ratio = float(limit_side_len) / max(h,w)
|
||||
else:
|
||||
raise Exception('not support limit type, image ')
|
||||
resize_h = int(h * ratio)
|
||||
resize_w = int(w * ratio)
|
||||
|
||||
resize_h = max(int(round(resize_h / 32) * 32), 32)
|
||||
resize_w = max(int(round(resize_w / 32) * 32), 32)
|
||||
|
||||
try:
|
||||
if int(resize_w) <= 0 or int(resize_h) <= 0:
|
||||
return None, (None, None)
|
||||
img = cv2.resize(img, (int(resize_w), int(resize_h)))
|
||||
except:
|
||||
print(img.shape, resize_w, resize_h)
|
||||
sys.exit(0)
|
||||
ratio_h = resize_h / float(h)
|
||||
ratio_w = resize_w / float(w)
|
||||
return img, [ratio_h, ratio_w]
|
||||
|
||||
def resize_image_type2(self, img):
|
||||
h, w, _ = img.shape
|
||||
|
||||
resize_w = w
|
||||
resize_h = h
|
||||
|
||||
if resize_h > resize_w:
|
||||
ratio = float(self.resize_long) / resize_h
|
||||
else:
|
||||
ratio = float(self.resize_long) / resize_w
|
||||
|
||||
resize_h = int(resize_h * ratio)
|
||||
resize_w = int(resize_w * ratio)
|
||||
|
||||
max_stride = 128
|
||||
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
|
||||
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
|
||||
img = cv2.resize(img, (int(resize_w), int(resize_h)))
|
||||
ratio_h = resize_h / float(h)
|
||||
ratio_w = resize_w / float(w)
|
||||
|
||||
return img, [ratio_h, ratio_w]
|
||||
|
||||
|
||||
class E2EResizeForTest(object):
|
||||
def __init__(self, **kwargs):
|
||||
super(E2EResizeForTest, self).__init__()
|
||||
self.max_side_len = kwargs['max_side_len']
|
||||
self.valid_set = kwargs['valid_set']
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
src_h, src_w, _ = img.shape
|
||||
if self.valid_set == 'totaltext':
|
||||
im_resized, [ratio_h, ratio_w] = self.resize_image_for_totaltext(
|
||||
img, max_side_len=self.max_side_len)
|
||||
else:
|
||||
im_resized, (ratio_h, ratio_w) = self.resize_image(
|
||||
img, max_side_len=self.max_side_len)
|
||||
data['image'] = im_resized
|
||||
data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
|
||||
return data
|
||||
|
||||
def resize_image_for_totaltext(self, im, max_side_len=512):
|
||||
|
||||
h, w, _ = im.shape
|
||||
resize_w = w
|
||||
resize_h = h
|
||||
ratio = 1.25
|
||||
if h * ratio > max_side_len:
|
||||
ratio = float(max_side_len) / resize_h
|
||||
resize_h = int(resize_h * ratio)
|
||||
resize_w = int(resize_w * ratio)
|
||||
|
||||
max_stride = 128
|
||||
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
|
||||
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
|
||||
im = cv2.resize(im, (int(resize_w), int(resize_h)))
|
||||
ratio_h = resize_h / float(h)
|
||||
ratio_w = resize_w / float(w)
|
||||
return im, (ratio_h, ratio_w)
|
||||
|
||||
def resize_image(self, im, max_side_len=512):
|
||||
"""
|
||||
resize image to a size multiple of max_stride which is required by the network
|
||||
:param im: the resized image
|
||||
:param max_side_len: limit of max image size to avoid out of memory in gpu
|
||||
:return: the resized image and the resize ratio
|
||||
"""
|
||||
h, w, _ = im.shape
|
||||
|
||||
resize_w = w
|
||||
resize_h = h
|
||||
|
||||
# Fix the longer side
|
||||
if resize_h > resize_w:
|
||||
ratio = float(max_side_len) / resize_h
|
||||
else:
|
||||
ratio = float(max_side_len) / resize_w
|
||||
|
||||
resize_h = int(resize_h * ratio)
|
||||
resize_w = int(resize_w * ratio)
|
||||
|
||||
max_stride = 128
|
||||
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
|
||||
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
|
||||
im = cv2.resize(im, (int(resize_w), int(resize_h)))
|
||||
ratio_h = resize_h / float(h)
|
||||
ratio_w = resize_w / float(w)
|
||||
|
||||
return im, (ratio_h, ratio_w)
|
||||
|
||||
|
||||
|
||||
class Pad_to_max_len(object):
|
||||
def __init__(self, **kwargs):
|
||||
super(Pad_to_max_len, self).__init__()
|
||||
self.max_h = kwargs['max_h']
|
||||
self.max_w = kwargs['max_w']
|
||||
|
||||
def __call__(self, data):
|
||||
img = data['image']
|
||||
if img.shape[-1] == 3:
|
||||
# hwc
|
||||
if img.shape[0]!= self.max_h:
|
||||
# TODO support
|
||||
# assert False, "not support"
|
||||
pad_h = self.max_h - img.shape[0]
|
||||
pad_w = self.max_w - img.shape[1]
|
||||
img = np.pad(img, ((0, pad_h), (0, pad_w), (0, 0)), 'constant', constant_values=0)
|
||||
if img.shape[1] < self.max_w:
|
||||
pad_w = self.max_w - img.shape[1]
|
||||
img = np.pad(img, ((0, 0), (0, pad_w), (0, 0)), 'constant', constant_values=0)
|
||||
|
||||
elif img.shape[0] == 3:
|
||||
# chw
|
||||
img = img.transpose((1, 2, 0))
|
||||
if img.shape[1]!= self.max_h:
|
||||
# TODO support
|
||||
assert False, "not support"
|
||||
if img.shape[0] < self.max_w:
|
||||
pad_w = self.max_w - img.shape[0]
|
||||
img = np.pad(img, ((0, 0), (0, 0), (0, pad_w)), 'constant', constant_values=0)
|
||||
|
||||
else:
|
||||
assert False, "not support"
|
||||
|
||||
data['image'] = img
|
||||
|
||||
return data
|
||||
814
plugins/rknn/src/rec_utils/rec_postprocess.py
Normal file
814
plugins/rknn/src/rec_utils/rec_postprocess.py
Normal file
@@ -0,0 +1,814 @@
|
||||
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import numpy as np
|
||||
# import paddle
|
||||
# from paddle.nn import functional as F
|
||||
import re
|
||||
|
||||
|
||||
class BaseRecLabelDecode(object):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=False):
|
||||
self.beg_str = "sos"
|
||||
self.end_str = "eos"
|
||||
|
||||
self.character_str = []
|
||||
if character_dict_path is None:
|
||||
self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
|
||||
dict_character = list(self.character_str)
|
||||
else:
|
||||
with open(character_dict_path, "rb") as fin:
|
||||
lines = fin.readlines()
|
||||
for line in lines:
|
||||
line = line.decode('utf-8').strip("\n").strip("\r\n")
|
||||
self.character_str.append(line)
|
||||
if use_space_char:
|
||||
self.character_str.append(" ")
|
||||
dict_character = list(self.character_str)
|
||||
|
||||
dict_character = self.add_special_char(dict_character)
|
||||
self.dict = {}
|
||||
for i, char in enumerate(dict_character):
|
||||
self.dict[char] = i
|
||||
self.character = dict_character
|
||||
|
||||
if 'arabic' in character_dict_path:
|
||||
self.reverse = True
|
||||
else:
|
||||
self.reverse = False
|
||||
|
||||
def pred_reverse(self, pred):
|
||||
pred_re = []
|
||||
c_current = ''
|
||||
for c in pred:
|
||||
if not bool(re.search('[a-zA-Z0-9 :*./%+-]', c)):
|
||||
if c_current != '':
|
||||
pred_re.append(c_current)
|
||||
pred_re.append(c)
|
||||
c_current = ''
|
||||
else:
|
||||
c_current += c
|
||||
if c_current != '':
|
||||
pred_re.append(c_current)
|
||||
|
||||
return ''.join(pred_re[::-1])
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
return dict_character
|
||||
|
||||
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
|
||||
""" convert text-index into text-label. """
|
||||
result_list = []
|
||||
ignored_tokens = self.get_ignored_tokens()
|
||||
batch_size = len(text_index)
|
||||
for batch_idx in range(batch_size):
|
||||
selection = np.ones(len(text_index[batch_idx]), dtype=bool)
|
||||
if is_remove_duplicate:
|
||||
selection[1:] = text_index[batch_idx][1:] != text_index[
|
||||
batch_idx][:-1]
|
||||
for ignored_token in ignored_tokens:
|
||||
selection &= text_index[batch_idx] != ignored_token
|
||||
|
||||
char_list = [
|
||||
self.character[text_id]
|
||||
for text_id in text_index[batch_idx][selection]
|
||||
]
|
||||
if text_prob is not None:
|
||||
conf_list = text_prob[batch_idx][selection]
|
||||
else:
|
||||
conf_list = [1] * len(selection)
|
||||
if len(conf_list) == 0:
|
||||
conf_list = [0]
|
||||
|
||||
text = ''.join(char_list)
|
||||
|
||||
if self.reverse: # for arabic rec
|
||||
text = self.pred_reverse(text)
|
||||
|
||||
result_list.append((text, np.mean(conf_list).tolist()))
|
||||
return result_list
|
||||
|
||||
def get_ignored_tokens(self):
|
||||
return [0] # for ctc blank
|
||||
|
||||
|
||||
class CTCLabelDecode(BaseRecLabelDecode):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=False,
|
||||
**kwargs):
|
||||
super(CTCLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
if isinstance(preds, tuple) or isinstance(preds, list):
|
||||
preds = preds[-1]
|
||||
# if isinstance(preds, paddle.Tensor):
|
||||
# preds = preds.numpy()
|
||||
preds_idx = preds.argmax(axis=2)
|
||||
preds_prob = preds.max(axis=2)
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
|
||||
if label is None:
|
||||
return text
|
||||
label = self.decode(label)
|
||||
return text, label
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
dict_character = ['blank'] + dict_character
|
||||
return dict_character
|
||||
|
||||
|
||||
class DistillationCTCLabelDecode(CTCLabelDecode):
|
||||
"""
|
||||
Convert
|
||||
Convert between text-label and text-index
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
character_dict_path=None,
|
||||
use_space_char=False,
|
||||
model_name=["student"],
|
||||
key=None,
|
||||
multi_head=False,
|
||||
**kwargs):
|
||||
super(DistillationCTCLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
if not isinstance(model_name, list):
|
||||
model_name = [model_name]
|
||||
self.model_name = model_name
|
||||
|
||||
self.key = key
|
||||
self.multi_head = multi_head
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
output = dict()
|
||||
for name in self.model_name:
|
||||
pred = preds[name]
|
||||
if self.key is not None:
|
||||
pred = pred[self.key]
|
||||
if self.multi_head and isinstance(pred, dict):
|
||||
pred = pred['ctc']
|
||||
output[name] = super().__call__(pred, label=label, *args, **kwargs)
|
||||
return output
|
||||
|
||||
|
||||
class AttnLabelDecode(BaseRecLabelDecode):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=False,
|
||||
**kwargs):
|
||||
super(AttnLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
self.beg_str = "sos"
|
||||
self.end_str = "eos"
|
||||
dict_character = dict_character
|
||||
dict_character = [self.beg_str] + dict_character + [self.end_str]
|
||||
return dict_character
|
||||
|
||||
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
|
||||
""" convert text-index into text-label. """
|
||||
result_list = []
|
||||
ignored_tokens = self.get_ignored_tokens()
|
||||
[beg_idx, end_idx] = self.get_ignored_tokens()
|
||||
batch_size = len(text_index)
|
||||
for batch_idx in range(batch_size):
|
||||
char_list = []
|
||||
conf_list = []
|
||||
for idx in range(len(text_index[batch_idx])):
|
||||
if text_index[batch_idx][idx] in ignored_tokens:
|
||||
continue
|
||||
if int(text_index[batch_idx][idx]) == int(end_idx):
|
||||
break
|
||||
if is_remove_duplicate:
|
||||
# only for predict
|
||||
if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
|
||||
batch_idx][idx]:
|
||||
continue
|
||||
char_list.append(self.character[int(text_index[batch_idx][
|
||||
idx])])
|
||||
if text_prob is not None:
|
||||
conf_list.append(text_prob[batch_idx][idx])
|
||||
else:
|
||||
conf_list.append(1)
|
||||
text = ''.join(char_list)
|
||||
result_list.append((text, np.mean(conf_list).tolist()))
|
||||
return result_list
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
"""
|
||||
text = self.decode(text)
|
||||
if label is None:
|
||||
return text
|
||||
else:
|
||||
label = self.decode(label, is_remove_duplicate=False)
|
||||
return text, label
|
||||
"""
|
||||
# if isinstance(preds, paddle.Tensor):
|
||||
# preds = preds.numpy()
|
||||
|
||||
preds_idx = preds.argmax(axis=2)
|
||||
preds_prob = preds.max(axis=2)
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
|
||||
if label is None:
|
||||
return text
|
||||
label = self.decode(label, is_remove_duplicate=False)
|
||||
return text, label
|
||||
|
||||
def get_ignored_tokens(self):
|
||||
beg_idx = self.get_beg_end_flag_idx("beg")
|
||||
end_idx = self.get_beg_end_flag_idx("end")
|
||||
return [beg_idx, end_idx]
|
||||
|
||||
def get_beg_end_flag_idx(self, beg_or_end):
|
||||
if beg_or_end == "beg":
|
||||
idx = np.array(self.dict[self.beg_str])
|
||||
elif beg_or_end == "end":
|
||||
idx = np.array(self.dict[self.end_str])
|
||||
else:
|
||||
assert False, "unsupport type %s in get_beg_end_flag_idx" \
|
||||
% beg_or_end
|
||||
return idx
|
||||
|
||||
|
||||
class SEEDLabelDecode(BaseRecLabelDecode):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=False,
|
||||
**kwargs):
|
||||
super(SEEDLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
self.padding_str = "padding"
|
||||
self.end_str = "eos"
|
||||
self.unknown = "unknown"
|
||||
dict_character = dict_character + [
|
||||
self.end_str, self.padding_str, self.unknown
|
||||
]
|
||||
return dict_character
|
||||
|
||||
def get_ignored_tokens(self):
|
||||
end_idx = self.get_beg_end_flag_idx("eos")
|
||||
return [end_idx]
|
||||
|
||||
def get_beg_end_flag_idx(self, beg_or_end):
|
||||
if beg_or_end == "sos":
|
||||
idx = np.array(self.dict[self.beg_str])
|
||||
elif beg_or_end == "eos":
|
||||
idx = np.array(self.dict[self.end_str])
|
||||
else:
|
||||
assert False, "unsupport type %s in get_beg_end_flag_idx" % beg_or_end
|
||||
return idx
|
||||
|
||||
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
|
||||
""" convert text-index into text-label. """
|
||||
result_list = []
|
||||
[end_idx] = self.get_ignored_tokens()
|
||||
batch_size = len(text_index)
|
||||
for batch_idx in range(batch_size):
|
||||
char_list = []
|
||||
conf_list = []
|
||||
for idx in range(len(text_index[batch_idx])):
|
||||
if int(text_index[batch_idx][idx]) == int(end_idx):
|
||||
break
|
||||
if is_remove_duplicate:
|
||||
# only for predict
|
||||
if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
|
||||
batch_idx][idx]:
|
||||
continue
|
||||
char_list.append(self.character[int(text_index[batch_idx][
|
||||
idx])])
|
||||
if text_prob is not None:
|
||||
conf_list.append(text_prob[batch_idx][idx])
|
||||
else:
|
||||
conf_list.append(1)
|
||||
text = ''.join(char_list)
|
||||
result_list.append((text, np.mean(conf_list).tolist()))
|
||||
return result_list
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
"""
|
||||
text = self.decode(text)
|
||||
if label is None:
|
||||
return text
|
||||
else:
|
||||
label = self.decode(label, is_remove_duplicate=False)
|
||||
return text, label
|
||||
"""
|
||||
preds_idx = preds["rec_pred"]
|
||||
# if isinstance(preds_idx, paddle.Tensor):
|
||||
# preds_idx = preds_idx.numpy()
|
||||
if "rec_pred_scores" in preds:
|
||||
preds_idx = preds["rec_pred"]
|
||||
preds_prob = preds["rec_pred_scores"]
|
||||
else:
|
||||
preds_idx = preds["rec_pred"].argmax(axis=2)
|
||||
preds_prob = preds["rec_pred"].max(axis=2)
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
|
||||
if label is None:
|
||||
return text
|
||||
label = self.decode(label, is_remove_duplicate=False)
|
||||
return text, label
|
||||
|
||||
|
||||
class SRNLabelDecode(BaseRecLabelDecode):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=False,
|
||||
**kwargs):
|
||||
super(SRNLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
self.max_text_length = kwargs.get('max_text_length', 25)
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
pred = preds['predict']
|
||||
char_num = len(self.character_str) + 2
|
||||
# if isinstance(pred, paddle.Tensor):
|
||||
# pred = pred.numpy()
|
||||
pred = np.reshape(pred, [-1, char_num])
|
||||
|
||||
preds_idx = np.argmax(pred, axis=1)
|
||||
preds_prob = np.max(pred, axis=1)
|
||||
|
||||
preds_idx = np.reshape(preds_idx, [-1, self.max_text_length])
|
||||
|
||||
preds_prob = np.reshape(preds_prob, [-1, self.max_text_length])
|
||||
|
||||
text = self.decode(preds_idx, preds_prob)
|
||||
|
||||
if label is None:
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
|
||||
return text
|
||||
label = self.decode(label)
|
||||
return text, label
|
||||
|
||||
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
|
||||
""" convert text-index into text-label. """
|
||||
result_list = []
|
||||
ignored_tokens = self.get_ignored_tokens()
|
||||
batch_size = len(text_index)
|
||||
|
||||
for batch_idx in range(batch_size):
|
||||
char_list = []
|
||||
conf_list = []
|
||||
for idx in range(len(text_index[batch_idx])):
|
||||
if text_index[batch_idx][idx] in ignored_tokens:
|
||||
continue
|
||||
if is_remove_duplicate:
|
||||
# only for predict
|
||||
if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
|
||||
batch_idx][idx]:
|
||||
continue
|
||||
char_list.append(self.character[int(text_index[batch_idx][
|
||||
idx])])
|
||||
if text_prob is not None:
|
||||
conf_list.append(text_prob[batch_idx][idx])
|
||||
else:
|
||||
conf_list.append(1)
|
||||
|
||||
text = ''.join(char_list)
|
||||
result_list.append((text, np.mean(conf_list).tolist()))
|
||||
return result_list
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
dict_character = dict_character + [self.beg_str, self.end_str]
|
||||
return dict_character
|
||||
|
||||
def get_ignored_tokens(self):
|
||||
beg_idx = self.get_beg_end_flag_idx("beg")
|
||||
end_idx = self.get_beg_end_flag_idx("end")
|
||||
return [beg_idx, end_idx]
|
||||
|
||||
def get_beg_end_flag_idx(self, beg_or_end):
|
||||
if beg_or_end == "beg":
|
||||
idx = np.array(self.dict[self.beg_str])
|
||||
elif beg_or_end == "end":
|
||||
idx = np.array(self.dict[self.end_str])
|
||||
else:
|
||||
assert False, "unsupport type %s in get_beg_end_flag_idx" \
|
||||
% beg_or_end
|
||||
return idx
|
||||
|
||||
|
||||
class SARLabelDecode(BaseRecLabelDecode):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=False,
|
||||
**kwargs):
|
||||
super(SARLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
|
||||
self.rm_symbol = kwargs.get('rm_symbol', False)
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
beg_end_str = "<BOS/EOS>"
|
||||
unknown_str = "<UKN>"
|
||||
padding_str = "<PAD>"
|
||||
dict_character = dict_character + [unknown_str]
|
||||
self.unknown_idx = len(dict_character) - 1
|
||||
dict_character = dict_character + [beg_end_str]
|
||||
self.start_idx = len(dict_character) - 1
|
||||
self.end_idx = len(dict_character) - 1
|
||||
dict_character = dict_character + [padding_str]
|
||||
self.padding_idx = len(dict_character) - 1
|
||||
return dict_character
|
||||
|
||||
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
|
||||
""" convert text-index into text-label. """
|
||||
result_list = []
|
||||
ignored_tokens = self.get_ignored_tokens()
|
||||
|
||||
batch_size = len(text_index)
|
||||
for batch_idx in range(batch_size):
|
||||
char_list = []
|
||||
conf_list = []
|
||||
for idx in range(len(text_index[batch_idx])):
|
||||
if text_index[batch_idx][idx] in ignored_tokens:
|
||||
continue
|
||||
if int(text_index[batch_idx][idx]) == int(self.end_idx):
|
||||
if text_prob is None and idx == 0:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
if is_remove_duplicate:
|
||||
# only for predict
|
||||
if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
|
||||
batch_idx][idx]:
|
||||
continue
|
||||
char_list.append(self.character[int(text_index[batch_idx][
|
||||
idx])])
|
||||
if text_prob is not None:
|
||||
conf_list.append(text_prob[batch_idx][idx])
|
||||
else:
|
||||
conf_list.append(1)
|
||||
text = ''.join(char_list)
|
||||
if self.rm_symbol:
|
||||
comp = re.compile('[^A-Z^a-z^0-9^\u4e00-\u9fa5]')
|
||||
text = text.lower()
|
||||
text = comp.sub('', text)
|
||||
result_list.append((text, np.mean(conf_list).tolist()))
|
||||
return result_list
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
# if isinstance(preds, paddle.Tensor):
|
||||
# preds = preds.numpy()
|
||||
preds_idx = preds.argmax(axis=2)
|
||||
preds_prob = preds.max(axis=2)
|
||||
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
|
||||
|
||||
if label is None:
|
||||
return text
|
||||
label = self.decode(label, is_remove_duplicate=False)
|
||||
return text, label
|
||||
|
||||
def get_ignored_tokens(self):
|
||||
return [self.padding_idx]
|
||||
|
||||
|
||||
class DistillationSARLabelDecode(SARLabelDecode):
|
||||
"""
|
||||
Convert
|
||||
Convert between text-label and text-index
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
character_dict_path=None,
|
||||
use_space_char=False,
|
||||
model_name=["student"],
|
||||
key=None,
|
||||
multi_head=False,
|
||||
**kwargs):
|
||||
super(DistillationSARLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
if not isinstance(model_name, list):
|
||||
model_name = [model_name]
|
||||
self.model_name = model_name
|
||||
|
||||
self.key = key
|
||||
self.multi_head = multi_head
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
output = dict()
|
||||
for name in self.model_name:
|
||||
pred = preds[name]
|
||||
if self.key is not None:
|
||||
pred = pred[self.key]
|
||||
if self.multi_head and isinstance(pred, dict):
|
||||
pred = pred['sar']
|
||||
output[name] = super().__call__(pred, label=label, *args, **kwargs)
|
||||
return output
|
||||
|
||||
|
||||
class PRENLabelDecode(BaseRecLabelDecode):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=False,
|
||||
**kwargs):
|
||||
super(PRENLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
padding_str = '<PAD>' # 0
|
||||
end_str = '<EOS>' # 1
|
||||
unknown_str = '<UNK>' # 2
|
||||
|
||||
dict_character = [padding_str, end_str, unknown_str] + dict_character
|
||||
self.padding_idx = 0
|
||||
self.end_idx = 1
|
||||
self.unknown_idx = 2
|
||||
|
||||
return dict_character
|
||||
|
||||
def decode(self, text_index, text_prob=None):
|
||||
""" convert text-index into text-label. """
|
||||
result_list = []
|
||||
batch_size = len(text_index)
|
||||
|
||||
for batch_idx in range(batch_size):
|
||||
char_list = []
|
||||
conf_list = []
|
||||
for idx in range(len(text_index[batch_idx])):
|
||||
if text_index[batch_idx][idx] == self.end_idx:
|
||||
break
|
||||
if text_index[batch_idx][idx] in \
|
||||
[self.padding_idx, self.unknown_idx]:
|
||||
continue
|
||||
char_list.append(self.character[int(text_index[batch_idx][
|
||||
idx])])
|
||||
if text_prob is not None:
|
||||
conf_list.append(text_prob[batch_idx][idx])
|
||||
else:
|
||||
conf_list.append(1)
|
||||
|
||||
text = ''.join(char_list)
|
||||
if len(text) > 0:
|
||||
result_list.append((text, np.mean(conf_list).tolist()))
|
||||
else:
|
||||
# here confidence of empty recog result is 1
|
||||
result_list.append(('', 1))
|
||||
return result_list
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
preds = preds.numpy()
|
||||
preds_idx = preds.argmax(axis=2)
|
||||
preds_prob = preds.max(axis=2)
|
||||
text = self.decode(preds_idx, preds_prob)
|
||||
if label is None:
|
||||
return text
|
||||
label = self.decode(label)
|
||||
return text, label
|
||||
|
||||
|
||||
class NRTRLabelDecode(BaseRecLabelDecode):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=True, **kwargs):
|
||||
super(NRTRLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
|
||||
if len(preds) == 2:
|
||||
preds_id = preds[0]
|
||||
preds_prob = preds[1]
|
||||
# if isinstance(preds_id, paddle.Tensor):
|
||||
# preds_id = preds_id.numpy()
|
||||
# if isinstance(preds_prob, paddle.Tensor):
|
||||
# preds_prob = preds_prob.numpy()
|
||||
if preds_id[0][0] == 2:
|
||||
preds_idx = preds_id[:, 1:]
|
||||
preds_prob = preds_prob[:, 1:]
|
||||
else:
|
||||
preds_idx = preds_id
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
|
||||
if label is None:
|
||||
return text
|
||||
label = self.decode(label[:, 1:])
|
||||
else:
|
||||
# if isinstance(preds, paddle.Tensor):
|
||||
# preds = preds.numpy()
|
||||
preds_idx = preds.argmax(axis=2)
|
||||
preds_prob = preds.max(axis=2)
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
|
||||
if label is None:
|
||||
return text
|
||||
label = self.decode(label[:, 1:])
|
||||
return text, label
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
dict_character = ['blank', '<unk>', '<s>', '</s>'] + dict_character
|
||||
return dict_character
|
||||
|
||||
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
|
||||
""" convert text-index into text-label. """
|
||||
result_list = []
|
||||
batch_size = len(text_index)
|
||||
for batch_idx in range(batch_size):
|
||||
char_list = []
|
||||
conf_list = []
|
||||
for idx in range(len(text_index[batch_idx])):
|
||||
try:
|
||||
char_idx = self.character[int(text_index[batch_idx][idx])]
|
||||
except:
|
||||
continue
|
||||
if char_idx == '</s>': # end
|
||||
break
|
||||
char_list.append(char_idx)
|
||||
if text_prob is not None:
|
||||
conf_list.append(text_prob[batch_idx][idx])
|
||||
else:
|
||||
conf_list.append(1)
|
||||
text = ''.join(char_list)
|
||||
result_list.append((text.lower(), np.mean(conf_list).tolist()))
|
||||
return result_list
|
||||
|
||||
|
||||
class ViTSTRLabelDecode(NRTRLabelDecode):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=False,
|
||||
**kwargs):
|
||||
super(ViTSTRLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
# if isinstance(preds, paddle.Tensor):
|
||||
# preds = preds[:, 1:].numpy()
|
||||
# else:
|
||||
# preds = preds[:, 1:]
|
||||
preds = preds[:, 1:].numpy()
|
||||
preds_idx = preds.argmax(axis=2)
|
||||
preds_prob = preds.max(axis=2)
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
|
||||
if label is None:
|
||||
return text
|
||||
label = self.decode(label[:, 1:])
|
||||
return text, label
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
dict_character = ['<s>', '</s>'] + dict_character
|
||||
return dict_character
|
||||
|
||||
|
||||
class ABINetLabelDecode(NRTRLabelDecode):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=False,
|
||||
**kwargs):
|
||||
super(ABINetLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
|
||||
def __call__(self, preds, label=None, *args, **kwargs):
|
||||
if isinstance(preds, dict):
|
||||
preds = preds['align'][-1].numpy()
|
||||
# elif isinstance(preds, paddle.Tensor):
|
||||
# preds = preds.numpy()
|
||||
# else:
|
||||
# preds = preds
|
||||
preds = preds.numpy()
|
||||
preds_idx = preds.argmax(axis=2)
|
||||
preds_prob = preds.max(axis=2)
|
||||
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
|
||||
if label is None:
|
||||
return text
|
||||
label = self.decode(label)
|
||||
return text, label
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
dict_character = ['</s>'] + dict_character
|
||||
return dict_character
|
||||
|
||||
|
||||
class SPINLabelDecode(AttnLabelDecode):
|
||||
""" Convert between text-label and text-index """
|
||||
|
||||
def __init__(self, character_dict_path=None, use_space_char=False,
|
||||
**kwargs):
|
||||
super(SPINLabelDecode, self).__init__(character_dict_path,
|
||||
use_space_char)
|
||||
|
||||
def add_special_char(self, dict_character):
|
||||
self.beg_str = "sos"
|
||||
self.end_str = "eos"
|
||||
dict_character = dict_character
|
||||
dict_character = [self.beg_str] + [self.end_str] + dict_character
|
||||
return dict_character
|
||||
|
||||
|
||||
# class VLLabelDecode(BaseRecLabelDecode):
|
||||
# """ Convert between text-label and text-index """
|
||||
|
||||
# def __init__(self, character_dict_path=None, use_space_char=False,
|
||||
# **kwargs):
|
||||
# super(VLLabelDecode, self).__init__(character_dict_path, use_space_char)
|
||||
# self.max_text_length = kwargs.get('max_text_length', 25)
|
||||
# self.nclass = len(self.character) + 1
|
||||
# self.character = self.character[10:] + self.character[
|
||||
# 1:10] + [self.character[0]]
|
||||
|
||||
# def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
|
||||
# """ convert text-index into text-label. """
|
||||
# result_list = []
|
||||
# ignored_tokens = self.get_ignored_tokens()
|
||||
# batch_size = len(text_index)
|
||||
# for batch_idx in range(batch_size):
|
||||
# selection = np.ones(len(text_index[batch_idx]), dtype=bool)
|
||||
# if is_remove_duplicate:
|
||||
# selection[1:] = text_index[batch_idx][1:] != text_index[
|
||||
# batch_idx][:-1]
|
||||
# for ignored_token in ignored_tokens:
|
||||
# selection &= text_index[batch_idx] != ignored_token
|
||||
|
||||
# char_list = [
|
||||
# self.character[text_id - 1]
|
||||
# for text_id in text_index[batch_idx][selection]
|
||||
# ]
|
||||
# if text_prob is not None:
|
||||
# conf_list = text_prob[batch_idx][selection]
|
||||
# else:
|
||||
# conf_list = [1] * len(selection)
|
||||
# if len(conf_list) == 0:
|
||||
# conf_list = [0]
|
||||
|
||||
# text = ''.join(char_list)
|
||||
# result_list.append((text, np.mean(conf_list).tolist()))
|
||||
# return result_list
|
||||
|
||||
# def __call__(self, preds, label=None, length=None, *args, **kwargs):
|
||||
# if len(preds) == 2: # eval mode
|
||||
# text_pre, x = preds
|
||||
# b = text_pre.shape[1]
|
||||
# lenText = self.max_text_length
|
||||
# nsteps = self.max_text_length
|
||||
|
||||
# if not isinstance(text_pre, paddle.Tensor):
|
||||
# text_pre = paddle.to_tensor(text_pre, dtype='float32')
|
||||
|
||||
# out_res = paddle.zeros(
|
||||
# shape=[lenText, b, self.nclass], dtype=x.dtype)
|
||||
# out_length = paddle.zeros(shape=[b], dtype=x.dtype)
|
||||
# now_step = 0
|
||||
# for _ in range(nsteps):
|
||||
# if 0 in out_length and now_step < nsteps:
|
||||
# tmp_result = text_pre[now_step, :, :]
|
||||
# out_res[now_step] = tmp_result
|
||||
# tmp_result = tmp_result.topk(1)[1].squeeze(axis=1)
|
||||
# for j in range(b):
|
||||
# if out_length[j] == 0 and tmp_result[j] == 0:
|
||||
# out_length[j] = now_step + 1
|
||||
# now_step += 1
|
||||
# for j in range(0, b):
|
||||
# if int(out_length[j]) == 0:
|
||||
# out_length[j] = nsteps
|
||||
# start = 0
|
||||
# output = paddle.zeros(
|
||||
# shape=[int(out_length.sum()), self.nclass], dtype=x.dtype)
|
||||
# for i in range(0, b):
|
||||
# cur_length = int(out_length[i])
|
||||
# output[start:start + cur_length] = out_res[0:cur_length, i, :]
|
||||
# start += cur_length
|
||||
# net_out = output
|
||||
# length = out_length
|
||||
|
||||
# else: # train mode
|
||||
# net_out = preds[0]
|
||||
# length = length
|
||||
# net_out = paddle.concat([t[:l] for t, l in zip(net_out, length)])
|
||||
# text = []
|
||||
# if not isinstance(net_out, paddle.Tensor):
|
||||
# net_out = paddle.to_tensor(net_out, dtype='float32')
|
||||
# net_out = F.softmax(net_out, axis=1)
|
||||
# for i in range(0, length.shape[0]):
|
||||
# preds_idx = net_out[int(length[:i].sum()):int(length[:i].sum(
|
||||
# ) + length[i])].topk(1)[1][:, 0].tolist()
|
||||
# preds_text = ''.join([
|
||||
# self.character[idx - 1]
|
||||
# if idx > 0 and idx <= len(self.character) else ''
|
||||
# for idx in preds_idx
|
||||
# ])
|
||||
# preds_prob = net_out[int(length[:i].sum()):int(length[:i].sum(
|
||||
# ) + length[i])].topk(1)[0][:, 0]
|
||||
# preds_prob = paddle.exp(
|
||||
# paddle.log(preds_prob).sum() / (preds_prob.shape[0] + 1e-6))
|
||||
# text.append((preds_text, preds_prob.numpy()[0]))
|
||||
# if label is None:
|
||||
# return text
|
||||
# label = self.decode(label)
|
||||
# return text, label
|
||||
|
||||
@@ -1,2 +1,6 @@
|
||||
https://github.com/airockchip/rknn-toolkit2/raw/v2.0.0-beta0/rknn-toolkit-lite2/packages/rknn_toolkit_lite2-2.0.0b0-cp310-cp310-linux_aarch64.whl
|
||||
pillow==10.3.0
|
||||
pillow==10.3.0
|
||||
six==1.16.0
|
||||
shapely== 2.0.4
|
||||
pyclipper==1.3.0.post5
|
||||
opencv-python-headless==4.9.0.80
|
||||
@@ -2,8 +2,8 @@ import asyncio
|
||||
import concurrent.futures
|
||||
import os
|
||||
import platform
|
||||
import queue
|
||||
import threading
|
||||
import traceback
|
||||
from typing import Any, Coroutine, List, Tuple
|
||||
import urllib.request
|
||||
|
||||
@@ -14,9 +14,14 @@ from rknnlite.api import RKNNLite
|
||||
from predict import PredictPlugin, Prediction
|
||||
from predict.rectangle import Rectangle
|
||||
|
||||
import scrypted_sdk
|
||||
from scrypted_sdk import DeviceProvider, ScryptedDeviceType, ScryptedInterface
|
||||
|
||||
# for Rockchip-optimized models, the postprocessing is slightly different from the original models
|
||||
from .optimized.yolo import post_process, IMG_SIZE, CLASSES
|
||||
|
||||
from .text_recognition import TEXT_RECOGNITION_NATIVE_ID, TextRecognition
|
||||
|
||||
|
||||
rknn_verbose = False
|
||||
lib_download = 'https://github.com/airockchip/rknn-toolkit2/raw/v2.0.0-beta0/rknpu2/runtime/Linux/librknn_api/aarch64/librknnrt.so'
|
||||
@@ -53,13 +58,16 @@ def ensure_compatibility_and_get_cpu():
|
||||
raise
|
||||
|
||||
|
||||
class RKNNPlugin(PredictPlugin):
|
||||
class RKNNPlugin(PredictPlugin, DeviceProvider):
|
||||
labels = {i: CLASSES[i] for i in range(len(CLASSES))}
|
||||
rknn_runtimes: dict
|
||||
executor: concurrent.futures.ThreadPoolExecutor
|
||||
text_recognition: TextRecognition = None
|
||||
cpu: str
|
||||
|
||||
def __init__(self, nativeId=None):
|
||||
super().__init__(nativeId)
|
||||
cpu = ensure_compatibility_and_get_cpu()
|
||||
self.cpu = ensure_compatibility_and_get_cpu()
|
||||
model = 'yolov6n'
|
||||
|
||||
self.rknn_runtimes = {}
|
||||
@@ -72,7 +80,7 @@ class RKNNPlugin(PredictPlugin):
|
||||
else:
|
||||
raise RuntimeError('librknnrt.so not found. Please download it from {} and place it at {}'.format(lib_download, lib_path))
|
||||
|
||||
model_download = model_download_tmpl.format(model, cpu)
|
||||
model_download = model_download_tmpl.format(model, self.cpu)
|
||||
model_file = os.path.basename(model_download)
|
||||
model_path = self.downloadFile(model_download, model_file)
|
||||
print('Using model {}'.format(model_path))
|
||||
@@ -101,7 +109,33 @@ class RKNNPlugin(PredictPlugin):
|
||||
self.rknn_runtimes[thread_name] = rknn
|
||||
print('RKNNLite runtime initialized on thread {}'.format(thread_name))
|
||||
|
||||
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3, initializer=executor_initializer)
|
||||
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3, thread_name_prefix=type(self).__name__, initializer=executor_initializer)
|
||||
|
||||
asyncio.create_task(self.discoverRecognitionModels())
|
||||
|
||||
async def discoverRecognitionModels(self) -> None:
|
||||
devices = [
|
||||
{
|
||||
"nativeId": TEXT_RECOGNITION_NATIVE_ID,
|
||||
"name": "Rockchip NPU Text Recognition",
|
||||
"type": ScryptedDeviceType.API.value,
|
||||
"interfaces": [
|
||||
ScryptedInterface.ObjectDetection.value,
|
||||
],
|
||||
}
|
||||
]
|
||||
await scrypted_sdk.deviceManager.onDevicesChanged({
|
||||
"devices": devices,
|
||||
})
|
||||
|
||||
async def getDevice(self, nativeId: str) -> TextRecognition:
|
||||
try:
|
||||
if nativeId == TEXT_RECOGNITION_NATIVE_ID:
|
||||
self.text_recognition = self.text_recognition or TextRecognition(nativeId, self.cpu)
|
||||
return self.text_recognition
|
||||
except:
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
def get_input_details(self) -> Tuple[int]:
|
||||
return (IMG_SIZE[0], IMG_SIZE[1], 3)
|
||||
|
||||
264
plugins/rknn/src/rknn/text_recognition.py
Normal file
264
plugins/rknn/src/rknn/text_recognition.py
Normal file
@@ -0,0 +1,264 @@
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import math
|
||||
import os
|
||||
import threading
|
||||
import traceback
|
||||
from typing import Any, Callable, List
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image, ImageOps
|
||||
from rknnlite.api import RKNNLite
|
||||
|
||||
from common.text import skew_image, crop_text, calculate_y_change
|
||||
from predict import Prediction
|
||||
from predict.rectangle import Rectangle
|
||||
from predict.text_recognize import TextRecognition
|
||||
import scrypted_sdk
|
||||
from scrypted_sdk.types import ObjectsDetected, ObjectDetectionResult
|
||||
import det_utils.operators
|
||||
import det_utils.db_postprocess
|
||||
import rec_utils.operators
|
||||
import rec_utils.rec_postprocess
|
||||
|
||||
|
||||
TEXT_RECOGNITION_NATIVE_ID = "textrecognition"
|
||||
DET_IMG_SIZE = (480, 480)
|
||||
|
||||
RKNN_DET_PREPROCESS_CONFIG = [
|
||||
{
|
||||
'DetResizeForTest': {
|
||||
'image_shape': DET_IMG_SIZE
|
||||
}
|
||||
},
|
||||
{
|
||||
'NormalizeImage': {
|
||||
'std': [1., 1., 1.],
|
||||
'mean': [0., 0., 0.],
|
||||
'scale': '1.',
|
||||
'order': 'hwc'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
RKNN_DET_POSTPROCESS_CONFIG = {
|
||||
'DBPostProcess': {
|
||||
'thresh': 0.3,
|
||||
'box_thresh': 0.6,
|
||||
'max_candidates': 1000,
|
||||
'unclip_ratio': 1.5,
|
||||
'use_dilation': False,
|
||||
'score_mode': 'fast',
|
||||
}
|
||||
}
|
||||
|
||||
RKNN_REC_PREPROCESS_CONFIG = [
|
||||
{
|
||||
'NormalizeImage': {
|
||||
'std': [1, 1, 1],
|
||||
'mean': [0, 0, 0],
|
||||
'scale': '1./255.',
|
||||
'order': 'hwc'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
RKNN_REC_POSTPROCESS_CONFIG = {
|
||||
'CTCLabelDecode':{
|
||||
"character_dict_path": None, # will be replaced by RKNNDetection.__init__()
|
||||
"use_space_char": True
|
||||
}
|
||||
}
|
||||
|
||||
rknn_verbose = False
|
||||
model_download_tmpl = 'https://github.com/bjia56/scrypted-rknn/raw/main/models/{}_{}.rknn'
|
||||
chardict_link = 'https://github.com/bjia56/scrypted-rknn/raw/main/models/ppocr_keys_v1.txt'
|
||||
|
||||
|
||||
class RKNNText:
|
||||
model_path: str
|
||||
rknn_runtimes: dict
|
||||
executor: concurrent.futures.ThreadPoolExecutor
|
||||
preprocess_funcs: List[Callable]
|
||||
postprocess_func: Callable
|
||||
print: Callable
|
||||
|
||||
def __init__(self, model_path, print) -> None:
|
||||
self.model_path = model_path
|
||||
self.rknn_runtimes = {}
|
||||
self.print = print
|
||||
|
||||
if not self.model_path:
|
||||
raise ValueError('model_path is not set')
|
||||
|
||||
def executor_initializer():
|
||||
thread_name = threading.current_thread().name
|
||||
rknn = RKNNLite(verbose=rknn_verbose)
|
||||
ret = rknn.load_rknn(self.model_path)
|
||||
if ret != 0:
|
||||
raise RuntimeError('Failed to load model: {}'.format(ret))
|
||||
|
||||
ret = rknn.init_runtime()
|
||||
if ret != 0:
|
||||
raise RuntimeError('Failed to init runtime: {}'.format(ret))
|
||||
|
||||
self.rknn_runtimes[thread_name] = rknn
|
||||
self.print('RKNNLite runtime initialized on thread {}'.format(thread_name))
|
||||
|
||||
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3, thread_name_prefix=type(self).__name__, initializer=executor_initializer)
|
||||
|
||||
def detect(self, img):
|
||||
def do_detect(img):
|
||||
model_input = img
|
||||
for p in self.preprocess_funcs:
|
||||
model_input = p(model_input)
|
||||
|
||||
rknn = self.rknn_runtimes[threading.current_thread().name]
|
||||
output = rknn.inference(inputs=[np.expand_dims(model_input['image'], axis=0)])
|
||||
|
||||
return self.postprocess_func(output, model_input['shape'], model_input['image'].shape)
|
||||
|
||||
future = self.executor.submit(do_detect, {'image': img, 'shape': img.shape})
|
||||
return future
|
||||
|
||||
|
||||
class RKNNDetection(RKNNText):
|
||||
db_preprocess = None
|
||||
det_postprocess = None
|
||||
|
||||
def __init__(self, model_path, print):
|
||||
super().__init__(model_path, print)
|
||||
|
||||
self.preprocess_funcs = []
|
||||
for item in RKNN_DET_PREPROCESS_CONFIG:
|
||||
for key in item:
|
||||
pclass = getattr(det_utils.operators, key)
|
||||
p = pclass(**item[key])
|
||||
self.preprocess_funcs.append(p)
|
||||
|
||||
self.db_postprocess = det_utils.db_postprocess.DBPostProcess(**RKNN_DET_POSTPROCESS_CONFIG['DBPostProcess'])
|
||||
self.det_postprocess = det_utils.db_postprocess.DetPostProcess()
|
||||
|
||||
def postprocess(output, model_shape, img_shape):
|
||||
preds = {'maps': output[0].astype(np.float32)}
|
||||
result = self.db_postprocess(preds, model_shape)
|
||||
return self.det_postprocess.filter_tag_det_res(result[0]['points'], img_shape)
|
||||
self.postprocess_func = postprocess
|
||||
|
||||
|
||||
class RKNNRecognition(RKNNText):
|
||||
ctc_postprocess = None
|
||||
|
||||
def __init__(self, model_path, print):
|
||||
super().__init__(model_path, print)
|
||||
|
||||
self.preprocess_funcs = []
|
||||
for item in RKNN_REC_PREPROCESS_CONFIG:
|
||||
for key in item:
|
||||
pclass = getattr(rec_utils.operators, key)
|
||||
p = pclass(**item[key])
|
||||
self.preprocess_funcs.append(p)
|
||||
|
||||
self.ctc_postprocess = rec_utils.rec_postprocess.CTCLabelDecode(**RKNN_REC_POSTPROCESS_CONFIG['CTCLabelDecode'])
|
||||
|
||||
def postprocess(output, model_shape, img_shape):
|
||||
preds = output[0].astype(np.float32)
|
||||
output = self.ctc_postprocess(preds)
|
||||
return output
|
||||
self.postprocess_func = postprocess
|
||||
|
||||
|
||||
async def prepare_text_result(d: ObjectDetectionResult, image: scrypted_sdk.Image, skew_angle: float):
|
||||
textImage = await crop_text(d, image)
|
||||
|
||||
skew_height_change = calculate_y_change(d["boundingBox"][3], skew_angle)
|
||||
skew_height_change = math.floor(skew_height_change)
|
||||
textImage = skew_image(textImage, skew_angle)
|
||||
# crop skew_height_change from top
|
||||
if skew_height_change > 0:
|
||||
textImage = textImage.crop((0, 0, textImage.width, textImage.height - skew_height_change))
|
||||
elif skew_height_change < 0:
|
||||
textImage = textImage.crop((0, -skew_height_change, textImage.width, textImage.height))
|
||||
|
||||
new_height = 48
|
||||
new_width = int(textImage.width * new_height / textImage.height)
|
||||
textImage = textImage.resize((new_width, new_height), resample=Image.LANCZOS).convert("L")
|
||||
|
||||
new_width = 320
|
||||
# calculate padding dimensions
|
||||
padding = (0, 0, new_width - textImage.width, 0)
|
||||
# todo: clamp entire edge rather than just center
|
||||
edge_color = textImage.getpixel((textImage.width - 1, textImage.height // 2))
|
||||
# pad image
|
||||
textImage = ImageOps.expand(textImage, padding, fill=edge_color)
|
||||
# pil to numpy
|
||||
image_array = np.array(textImage)
|
||||
image_array = image_array.reshape(textImage.height, textImage.width, 1)
|
||||
image_tensor = image_array#.transpose((2, 0, 1)) / 255
|
||||
|
||||
# test normalize contrast
|
||||
# image_tensor = (image_tensor - np.min(image_tensor)) / (np.max(image_tensor) - np.min(image_tensor))
|
||||
|
||||
image_tensor = (image_tensor - 0.5) / 0.5
|
||||
|
||||
return image_tensor
|
||||
|
||||
|
||||
class TextRecognition(TextRecognition):
|
||||
detection: RKNNDetection
|
||||
recognition: RKNNRecognition
|
||||
|
||||
def __init__(self, nativeId=None, cpu=""):
|
||||
super().__init__(nativeId)
|
||||
|
||||
model_download = model_download_tmpl.format("ppocrv4_det", cpu)
|
||||
model_file = os.path.basename(model_download)
|
||||
det_model_path = self.downloadFile(model_download, model_file)
|
||||
|
||||
model_download = model_download_tmpl.format("ppocrv4_rec", cpu)
|
||||
model_file = os.path.basename(model_download)
|
||||
rec_model_path = self.downloadFile(model_download, model_file)
|
||||
|
||||
chardict_file = os.path.basename(chardict_link)
|
||||
chardict_path = self.downloadFile(chardict_link, chardict_file)
|
||||
RKNN_REC_POSTPROCESS_CONFIG['CTCLabelDecode']['character_dict_path'] = chardict_path
|
||||
|
||||
self.detection = RKNNDetection(det_model_path, lambda *args, **kwargs: self.print(*args, **kwargs))
|
||||
self.recognition = RKNNRecognition(rec_model_path, lambda *args, **kwargs: self.print(*args, **kwargs))
|
||||
self.inputheight = DET_IMG_SIZE[0]
|
||||
self.inputwidth = DET_IMG_SIZE[1]
|
||||
|
||||
async def detect_once(self, input: Image, settings: Any, src_size, cvss) -> ObjectsDetected:
|
||||
detections = await asyncio.wrap_future(
|
||||
self.detection.detect(np.array(input)), loop=asyncio.get_event_loop()
|
||||
)
|
||||
|
||||
#self.print(detections)
|
||||
|
||||
predictions: List[Prediction] = []
|
||||
for box in detections:
|
||||
#self.print(box)
|
||||
tl, tr, br, bl = box
|
||||
l = min(tl[0], bl[0])
|
||||
t = min(tl[1], tr[1])
|
||||
r = max(tr[0], br[0])
|
||||
b = max(bl[1], br[1])
|
||||
|
||||
pred = Prediction(0, 1, Rectangle(l, t, r, b))
|
||||
predictions.append(pred)
|
||||
|
||||
return self.create_detection_result(predictions, src_size, cvss)
|
||||
|
||||
async def setLabel(
|
||||
self, d: ObjectDetectionResult, image: scrypted_sdk.Image, skew_angle: float
|
||||
):
|
||||
try:
|
||||
image_tensor = await prepare_text_result(d, image, skew_angle)
|
||||
preds = await asyncio.wrap_future(
|
||||
self.recognition.detect(image_tensor), loop=asyncio.get_event_loop()
|
||||
)
|
||||
#self.print("preds", preds)
|
||||
d["label"] = preds[0][0]
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
pass
|
||||
975
plugins/sip/package-lock.json
generated
975
plugins/sip/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/sip",
|
||||
"version": "0.0.9",
|
||||
"version": "0.0.10",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
"prescrypted-setup-project": "scrypted-package-json",
|
||||
@@ -32,17 +32,16 @@
|
||||
]
|
||||
},
|
||||
"dependencies": {
|
||||
"@homebridge/camera-utils": "^2.0.4",
|
||||
"@slyoldfox/sip": "^0.0.6-1",
|
||||
"pick-port": "^1.0.0",
|
||||
"rxjs": "^7.8.1",
|
||||
"sdp": "^3.0.3",
|
||||
"stun": "^2.1.0",
|
||||
"uuid": "^8.3.2"
|
||||
"stun": "^2.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
"@scrypted/sdk": "file:../../sdk",
|
||||
"@types/node": "^16.9.6",
|
||||
"@types/uuid": "^8.3.4",
|
||||
"cross-env": "^7.0.3"
|
||||
}
|
||||
}
|
||||
|
||||
62
plugins/sip/src/port-utils.ts
Normal file
62
plugins/sip/src/port-utils.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
import { Socket } from 'dgram'
|
||||
import { AddressInfo } from 'net'
|
||||
import { pickPort } from 'pick-port'
|
||||
|
||||
// Need to reserve ports in sequence because ffmpeg uses the next port up by default. If it's taken, ffmpeg will error
|
||||
export async function reservePorts({
|
||||
count = 1,
|
||||
type = 'udp',
|
||||
attemptNumber = 0,
|
||||
}: {
|
||||
count?: number
|
||||
type?: 'udp' | 'tcp'
|
||||
attemptNumber?: number
|
||||
} = {}): Promise<number[]> {
|
||||
if (attemptNumber > 100) {
|
||||
throw new Error('Failed to reserve ports after 100 tries')
|
||||
}
|
||||
|
||||
const pickPortOptions = {
|
||||
type,
|
||||
reserveTimeout: 15, // 15 seconds is max setup time for HomeKit streams, so the port should be in use by then
|
||||
},
|
||||
port = await pickPort(pickPortOptions),
|
||||
ports = [port],
|
||||
tryAgain = () => {
|
||||
return reservePorts({
|
||||
count,
|
||||
type,
|
||||
attemptNumber: attemptNumber + 1,
|
||||
})
|
||||
}
|
||||
|
||||
for (let i = 1; i < count; i++) {
|
||||
try {
|
||||
const targetConsecutivePort = port + i,
|
||||
openPort = await pickPort({
|
||||
...pickPortOptions,
|
||||
minPort: targetConsecutivePort,
|
||||
maxPort: targetConsecutivePort,
|
||||
})
|
||||
|
||||
ports.push(openPort)
|
||||
} catch (_) {
|
||||
// can't reserve next port, bail and get another set
|
||||
return tryAgain()
|
||||
}
|
||||
}
|
||||
|
||||
return ports
|
||||
}
|
||||
|
||||
export function bindToPort(socket: Socket) {
|
||||
return new Promise<number>((resolve, reject) => {
|
||||
socket.on('error', reject)
|
||||
|
||||
// 0 means select a random open port
|
||||
socket.bind(0, () => {
|
||||
const { port } = socket.address() as AddressInfo
|
||||
resolve(port)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -1,10 +1,14 @@
|
||||
// by @dgrief from @homebridge/camera-utils
|
||||
import { SrtpOptions } from '@homebridge/camera-utils'
|
||||
import dgram from 'dgram'
|
||||
const stun = require('stun')
|
||||
|
||||
const stunMagicCookie = 0x2112a442 // https://tools.ietf.org/html/rfc5389#section-6
|
||||
|
||||
export interface SrtpOptions {
|
||||
srtpKey: Buffer
|
||||
srtpSalt: Buffer
|
||||
}
|
||||
|
||||
export interface RtpStreamOptions extends SrtpOptions {
|
||||
port: number
|
||||
rtcpPort: number
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { reservePorts } from '@homebridge/camera-utils';
|
||||
import { reservePorts } from './port-utils';
|
||||
import { createBindUdp, createBindZero } from '@scrypted/common/src/listen-cluster';
|
||||
import dgram from 'dgram';
|
||||
import { ReplaySubject, timer } from 'rxjs';
|
||||
|
||||
@@ -2,7 +2,7 @@ import { noop, Subject } from 'rxjs'
|
||||
import { randomInteger, randomString } from './util'
|
||||
import { RtpDescription, RtpOptions, RtpStreamDescription } from './rtp-utils'
|
||||
import { decodeSrtpOptions } from '../../ring/src/srtp-utils'
|
||||
import { stringify, stringifyUri } from '@slyoldfox/sip'
|
||||
import { stringify } from '@slyoldfox/sip'
|
||||
import { timeoutPromise } from '@scrypted/common/src/promise-utils';
|
||||
import sdp from 'sdp'
|
||||
|
||||
@@ -181,6 +181,9 @@ export class SipManager {
|
||||
// },
|
||||
ws: false,
|
||||
logger: {
|
||||
error: function(e) {
|
||||
if( sipOptions.debugSip ) console.error(e)
|
||||
},
|
||||
recv: function(m, remote) {
|
||||
if( (m.status == '200' || m.method === 'INVITE' ) && m.headers && m.headers.cseq && m.headers.cseq.method === 'INVITE' && m.headers.contact && m.headers.contact[0] ) {
|
||||
// ACK for INVITE and BYE must use the registrar contact uri
|
||||
@@ -447,10 +450,6 @@ export class SipManager {
|
||||
|
||||
return parseRtpDescription(this.console, incomingCallRequest)
|
||||
} else {
|
||||
if( this.sipOptions.to.toLocaleLowerCase().indexOf('c300x') >= 0 ) {
|
||||
// Needed for bt_answering_machine (bticino specific)
|
||||
audio.unshift('a=DEVADDR:20')
|
||||
}
|
||||
let inviteResponse = await this.request({
|
||||
method: 'INVITE',
|
||||
headers: {
|
||||
|
||||
@@ -1,13 +1,7 @@
|
||||
import { v4 as generateRandomUuid, v5 as generateUuidFromNamespace } from 'uuid'
|
||||
|
||||
const uuidNamespace = 'e53ffdc0-e91d-4ce1-bec2-df939d94739d'
|
||||
const crypto = require('crypto');
|
||||
|
||||
export function generateUuid(seed?: string) {
|
||||
if (seed) {
|
||||
return generateUuidFromNamespace(seed, uuidNamespace)
|
||||
}
|
||||
|
||||
return generateRandomUuid()
|
||||
return crypto.randomUUID();
|
||||
}
|
||||
|
||||
export function randomInteger() {
|
||||
|
||||
4
plugins/snapshot/package-lock.json
generated
4
plugins/snapshot/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/snapshot",
|
||||
"version": "0.2.50",
|
||||
"version": "0.2.52",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/snapshot",
|
||||
"version": "0.2.50",
|
||||
"version": "0.2.52",
|
||||
"dependencies": {
|
||||
"@types/node": "^20.10.6",
|
||||
"sharp": "^0.33.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/snapshot",
|
||||
"version": "0.2.50",
|
||||
"version": "0.2.52",
|
||||
"description": "Snapshot Plugin for Scrypted",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
|
||||
@@ -271,10 +271,6 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
|
||||
}
|
||||
|
||||
async takePictureRaw(options?: RequestPictureOptions): Promise<Buffer> {
|
||||
let rawPicturePromise: Promise<{
|
||||
picture: Buffer;
|
||||
pictureTime: number;
|
||||
}>;
|
||||
const eventSnapshot = options?.reason === 'event';
|
||||
const periodicSnapshot = options?.reason === 'periodic';
|
||||
|
||||
@@ -282,50 +278,61 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
|
||||
if (this.currentPictureTime < Date.now() - 1 * 60 * 60 * 1000)
|
||||
this.currentPicture = undefined;
|
||||
|
||||
const allowedSnapshotStaleness = eventSnapshot ? 0 : periodicSnapshot ? 20000 : 10000;
|
||||
|
||||
let needRefresh = true;
|
||||
if (this.currentPicture && this.currentPictureTime > Date.now() - allowedSnapshotStaleness) {
|
||||
this.debugConsole?.log('Using cached snapshot for', options?.reason);
|
||||
rawPicturePromise = Promise.resolve({
|
||||
picture: this.currentPicture,
|
||||
pictureTime: this.currentPictureTime,
|
||||
});
|
||||
needRefresh = this.currentPictureTime < Date.now() - allowedSnapshotStaleness / 2;
|
||||
}
|
||||
|
||||
if (needRefresh) {
|
||||
const debounced = this.snapshotDebouncer({
|
||||
id: options?.id,
|
||||
reason: options?.reason,
|
||||
}, eventSnapshot ? 0 : 10000, async () => {
|
||||
const snapshotTimer = Date.now();
|
||||
let picture = await this.takePictureInternal();
|
||||
picture = await this.cropAndScale(picture);
|
||||
this.clearCachedPictures();
|
||||
const pictureTime = Date.now();
|
||||
this.currentPicture = picture;
|
||||
this.currentPictureTime = pictureTime;
|
||||
this.lastAvailablePicture = picture;
|
||||
this.debugConsole?.debug(`Periodic snapshot took ${(this.currentPictureTime - snapshotTimer) / 1000} seconds to retrieve.`)
|
||||
return {
|
||||
picture,
|
||||
pictureTime,
|
||||
};
|
||||
});
|
||||
debounced.catch(() => { });
|
||||
|
||||
rawPicturePromise ||= debounced;
|
||||
}
|
||||
// always grab/debounce a snapshot
|
||||
// event snapshot are special and should immediately expire.
|
||||
// other snapshots may be debounced for 4s.
|
||||
const debounced = this.snapshotDebouncer({
|
||||
id: options?.id,
|
||||
type: 'source',
|
||||
event: options?.reason === 'event',
|
||||
}, eventSnapshot ? 0 : 4000, async () => {
|
||||
const snapshotTimer = Date.now();
|
||||
let picture = await this.takePictureInternal();
|
||||
picture = await this.cropAndScale(picture);
|
||||
this.clearCachedPictures();
|
||||
const pictureTime = Date.now();
|
||||
this.currentPicture = picture;
|
||||
this.currentPictureTime = pictureTime;
|
||||
this.lastAvailablePicture = picture;
|
||||
this.debugConsole?.debug(`Periodic snapshot took ${(this.currentPictureTime - snapshotTimer) / 1000} seconds to retrieve.`)
|
||||
return {
|
||||
picture,
|
||||
pictureTime,
|
||||
};
|
||||
});
|
||||
debounced.catch(() => { });
|
||||
|
||||
// prevent this from expiring
|
||||
let availablePicture = this.currentPicture;
|
||||
let availablePictureTime = this.currentPictureTime;
|
||||
|
||||
let rawPicture: Awaited<typeof rawPicturePromise>;
|
||||
let rawPicture: Awaited<typeof debounced>;
|
||||
try {
|
||||
const pictureTimeout = options?.timeout || (periodicSnapshot && availablePicture ? 1000 : 10000) || 10000;
|
||||
rawPicture = await timeoutPromise(pictureTimeout, rawPicturePromise);
|
||||
let pictureTimeout = options?.timeout;
|
||||
if (!pictureTimeout) {
|
||||
// determine a fetch timeout based on the reason and staleness
|
||||
const allowedSnapshotStaleness = eventSnapshot ? 0 : periodicSnapshot ? 20000 : 10000;
|
||||
if (!availablePicture) {
|
||||
// none available so wait a while
|
||||
pictureTimeout = 10000;
|
||||
}
|
||||
else {
|
||||
if (availablePictureTime > Date.now() - 3000) {
|
||||
// very recent, don't wait for too long
|
||||
pictureTimeout = 1000;
|
||||
}
|
||||
else if (availablePictureTime > Date.now() - allowedSnapshotStaleness) {
|
||||
// fairly recent so give it little time to get a fresh one
|
||||
// idr interval is typically 4000 for reference
|
||||
pictureTimeout = 3000;
|
||||
}
|
||||
else {
|
||||
// stale so wait a while
|
||||
pictureTimeout = 10000;
|
||||
}
|
||||
}
|
||||
}
|
||||
rawPicture = await timeoutPromise(pictureTimeout, debounced);
|
||||
}
|
||||
catch (e) {
|
||||
// a best effort was made to get a recent snapshot from cache or from a camera request,
|
||||
@@ -336,7 +343,11 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
|
||||
if (eventSnapshot)
|
||||
throw e;
|
||||
|
||||
availablePicture = this.currentPicture || availablePicture;
|
||||
if (this.currentPicture) {
|
||||
// use the current picture if it is still available as it may be newer.
|
||||
availablePicture = this.currentPicture;
|
||||
availablePictureTime = this.currentPictureTime;
|
||||
}
|
||||
|
||||
if (!availablePicture)
|
||||
return this.createErrorImage(e);
|
||||
@@ -358,8 +369,8 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
|
||||
|
||||
try {
|
||||
const key = {
|
||||
type: 'resize',
|
||||
pictureTime: rawPicture.pictureTime,
|
||||
reason: options?.reason,
|
||||
needSoftwareResize: true,
|
||||
picture: options.picture,
|
||||
};
|
||||
|
||||
4
plugins/tapo/package-lock.json
generated
4
plugins/tapo/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/tapo",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.16",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/tapo",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.16",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
"@scrypted/sdk": "file:../../sdk",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/tapo",
|
||||
"version": "0.0.13",
|
||||
"version": "0.0.16",
|
||||
"description": "Tapo Camera Plugin for Scrypted",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
|
||||
4
plugins/tensorflow-lite/package-lock.json
generated
4
plugins/tensorflow-lite/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.1.59",
|
||||
"version": "0.1.60",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.1.59",
|
||||
"version": "0.1.60",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -53,5 +53,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.59"
|
||||
"version": "0.1.60"
|
||||
}
|
||||
|
||||
@@ -73,7 +73,6 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
|
||||
if mediaObject.mimeType == ScryptedMimeTypes.Image.value:
|
||||
image = await scrypted_sdk.sdk.connectRPCObject(mediaObject)
|
||||
else:
|
||||
print('non image provided')
|
||||
image = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.Image.value)
|
||||
|
||||
return await self.run_detection_image(image, session)
|
||||
|
||||
@@ -1,51 +1,27 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from asyncio import Future
|
||||
import base64
|
||||
import concurrent.futures
|
||||
import os
|
||||
from typing import Any, Tuple, List
|
||||
import traceback
|
||||
from asyncio import Future
|
||||
from typing import Any, List, Tuple
|
||||
|
||||
import numpy as np
|
||||
# import Quartz
|
||||
import scrypted_sdk
|
||||
# from Foundation import NSData, NSMakeSize
|
||||
from PIL import Image
|
||||
from scrypted_sdk import (
|
||||
Setting,
|
||||
SettingValue,
|
||||
ObjectDetectionSession,
|
||||
ObjectsDetected,
|
||||
ObjectDetectionResult,
|
||||
)
|
||||
import traceback
|
||||
from scrypted_sdk import (ObjectDetectionResult, ObjectDetectionSession,
|
||||
ObjectsDetected)
|
||||
|
||||
# import Vision
|
||||
from predict import PredictPlugin
|
||||
from common import yolo
|
||||
from common.text import prepare_text_result, process_text_result
|
||||
from predict import PredictPlugin
|
||||
|
||||
def euclidean_distance(arr1, arr2):
|
||||
return np.linalg.norm(arr1 - arr2)
|
||||
|
||||
|
||||
def cosine_similarity(vector_a, vector_b):
|
||||
dot_product = np.dot(vector_a, vector_b)
|
||||
norm_a = np.linalg.norm(vector_a)
|
||||
norm_b = np.linalg.norm(vector_b)
|
||||
similarity = dot_product / (norm_a * norm_b)
|
||||
return similarity
|
||||
|
||||
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "Recognize")
|
||||
|
||||
class FaceRecognizeDetection(PredictPlugin):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(nativeId=nativeId)
|
||||
|
||||
self.inputheight = 640
|
||||
self.inputwidth = 640
|
||||
self.inputheight = 320
|
||||
self.inputwidth = 320
|
||||
|
||||
self.labels = {
|
||||
0: "face",
|
||||
@@ -55,21 +31,12 @@ class FaceRecognizeDetection(PredictPlugin):
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.minThreshold = 0.7
|
||||
|
||||
self.detectModel = self.downloadModel("scrypted_yolov9c_flt")
|
||||
self.textModel = self.downloadModel("vgg_english_g2")
|
||||
self.detectModel = self.downloadModel("scrypted_yolov8n_flt_320")
|
||||
self.faceModel = self.downloadModel("inception_resnet_v1")
|
||||
|
||||
def downloadModel(self, model: str):
|
||||
pass
|
||||
|
||||
async def getSettings(self) -> list[Setting]:
|
||||
pass
|
||||
|
||||
async def putSetting(self, key: str, value: SettingValue):
|
||||
self.storage.setItem(key, value)
|
||||
await self.onDeviceEvent(scrypted_sdk.ScryptedInterface.Settings.value, None)
|
||||
await scrypted_sdk.deviceManager.requestRestart()
|
||||
|
||||
# width, height, channels
|
||||
def get_input_details(self) -> Tuple[int, int, int]:
|
||||
return (self.inputwidth, self.inputheight, 3)
|
||||
@@ -81,9 +48,7 @@ class FaceRecognizeDetection(PredictPlugin):
|
||||
return "rgb"
|
||||
|
||||
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
|
||||
results = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor, lambda: self.predictDetectModel(input)
|
||||
)
|
||||
results = await self.predictDetectModel(input)
|
||||
objs = yolo.parse_yolov9(results)
|
||||
ret = self.create_detection_result(objs, src_size, cvss)
|
||||
return ret
|
||||
@@ -112,10 +77,7 @@ class FaceRecognizeDetection(PredictPlugin):
|
||||
processed_tensor = (image_tensor - 127.5) / 128.0
|
||||
processed_tensor = np.expand_dims(processed_tensor, axis=0)
|
||||
|
||||
output = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor,
|
||||
lambda: self.predictFaceModel(processed_tensor)
|
||||
)
|
||||
output = await self.predictFaceModel(processed_tensor)
|
||||
|
||||
b = output.tobytes()
|
||||
embedding = base64.b64encode(b).decode("utf-8")
|
||||
@@ -124,30 +86,13 @@ class FaceRecognizeDetection(PredictPlugin):
|
||||
|
||||
traceback.print_exc()
|
||||
pass
|
||||
|
||||
def predictTextModel(self, input):
|
||||
|
||||
async def predictDetectModel(self, input: Image.Image):
|
||||
pass
|
||||
|
||||
def predictDetectModel(self, input):
|
||||
async def predictFaceModel(self, prepareTensor):
|
||||
pass
|
||||
|
||||
def predictFaceModel(self, input):
|
||||
pass
|
||||
|
||||
async def setLabel(self, d: ObjectDetectionResult, image: scrypted_sdk.Image):
|
||||
try:
|
||||
|
||||
image_tensor = await prepare_text_result(d, image)
|
||||
preds = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor,
|
||||
lambda: self.predictTextModel(image_tensor),
|
||||
)
|
||||
d['label'] = process_text_result(preds)
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
pass
|
||||
|
||||
async def run_detection_image(
|
||||
self, image: scrypted_sdk.Image, detection_session: ObjectDetectionSession
|
||||
) -> ObjectsDetected:
|
||||
@@ -206,31 +151,8 @@ class FaceRecognizeDetection(PredictPlugin):
|
||||
for d in ret["detections"]:
|
||||
if d["className"] == "face":
|
||||
futures.append(asyncio.ensure_future(self.setEmbedding(d, image)))
|
||||
# elif d["className"] == "plate":
|
||||
# futures.append(asyncio.ensure_future(self.setLabel(d, image)))
|
||||
# elif d['className'] == 'text':
|
||||
# futures.append(asyncio.ensure_future(self.setLabel(d, image)))
|
||||
|
||||
if len(futures):
|
||||
await asyncio.wait(futures)
|
||||
|
||||
last = None
|
||||
for d in ret['detections']:
|
||||
if d["className"] != "face":
|
||||
continue
|
||||
check = d.get("embedding")
|
||||
if check is None:
|
||||
continue
|
||||
# decode base64 string check
|
||||
embedding = base64.b64decode(check)
|
||||
embedding = np.frombuffer(embedding, dtype=np.float32)
|
||||
if last is None:
|
||||
last = embedding
|
||||
continue
|
||||
# convert to numpy float32 arrays
|
||||
similarity = cosine_similarity(last, embedding)
|
||||
print('similarity', similarity)
|
||||
last = embedding
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
@@ -41,10 +41,10 @@ class TextRecognition(PredictPlugin):
|
||||
def downloadModel(self, model: str):
|
||||
pass
|
||||
|
||||
def predictDetectModel(self, input):
|
||||
async def predictDetectModel(self, input: np.ndarray):
|
||||
pass
|
||||
|
||||
def predictTextModel(self, input):
|
||||
async def predictTextModel(self, input: np.ndarray):
|
||||
pass
|
||||
|
||||
async def detect_once(
|
||||
@@ -56,9 +56,7 @@ class TextRecognition(PredictPlugin):
|
||||
# add extra dimension to tensor
|
||||
image_tensor = np.expand_dims(image_tensor, axis=0)
|
||||
|
||||
y = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor, lambda: self.predictDetectModel(image_tensor)
|
||||
)
|
||||
y = await self.predictDetectModel(image_tensor)
|
||||
|
||||
estimate_num_chars = False
|
||||
ratio_h = ratio_w = 1
|
||||
@@ -156,12 +154,8 @@ class TextRecognition(PredictPlugin):
|
||||
self, d: ObjectDetectionResult, image: scrypted_sdk.Image, skew_angle: float
|
||||
):
|
||||
try:
|
||||
|
||||
image_tensor = await prepare_text_result(d, image, skew_angle)
|
||||
preds = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor,
|
||||
lambda: self.predictTextModel(image_tensor),
|
||||
)
|
||||
preds = await self.predictTextModel(image_tensor)
|
||||
d["label"] = process_text_result(preds)
|
||||
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import threading
|
||||
|
||||
from PIL import Image
|
||||
from pycoral.adapters import detect
|
||||
@@ -121,7 +122,8 @@ class TensorFlowLitePlugin(
|
||||
labels_contents = open(labelsFile, "r").read()
|
||||
self.labels = parse_label_contents(labels_contents)
|
||||
|
||||
self.interpreters = queue.Queue()
|
||||
self.interpreters = {}
|
||||
available_interpreters = []
|
||||
self.interpreter_count = 0
|
||||
|
||||
def downloadModel():
|
||||
@@ -145,7 +147,7 @@ class TensorFlowLitePlugin(
|
||||
"shape"
|
||||
]
|
||||
self.input_details = int(width), int(height), int(channels)
|
||||
self.interpreters.put(interpreter)
|
||||
available_interpreters.append(interpreter)
|
||||
self.interpreter_count = self.interpreter_count + 1
|
||||
print("added tpu %s" % (edge_tpu))
|
||||
except Exception as e:
|
||||
@@ -165,12 +167,20 @@ class TensorFlowLitePlugin(
|
||||
interpreter.allocate_tensors()
|
||||
_, height, width, channels = interpreter.get_input_details()[0]["shape"]
|
||||
self.input_details = int(width), int(height), int(channels)
|
||||
self.interpreters.put(interpreter)
|
||||
available_interpreters.append(interpreter)
|
||||
self.interpreter_count = self.interpreter_count + 1
|
||||
|
||||
print(modelFile, labelsFile)
|
||||
|
||||
def executor_initializer():
|
||||
thread_name = threading.current_thread().name
|
||||
interpreter = available_interpreters.pop()
|
||||
self.interpreters[thread_name] = interpreter
|
||||
print('Interpreter initialized on thread {}'.format(thread_name))
|
||||
|
||||
|
||||
self.executor = concurrent.futures.ThreadPoolExecutor(
|
||||
initializer=executor_initializer,
|
||||
max_workers=self.interpreter_count,
|
||||
thread_name_prefix="tflite",
|
||||
)
|
||||
@@ -208,53 +218,50 @@ class TensorFlowLitePlugin(
|
||||
|
||||
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
|
||||
def predict():
|
||||
interpreter = self.interpreters.get()
|
||||
try:
|
||||
if self.yolo:
|
||||
tensor_index = input_details(interpreter, "index")
|
||||
interpreter = self.interpreters[threading.current_thread().name]
|
||||
if self.yolo:
|
||||
tensor_index = input_details(interpreter, "index")
|
||||
|
||||
im = np.stack([input])
|
||||
i = interpreter.get_input_details()[0]
|
||||
if i["dtype"] == np.int8:
|
||||
scale, zero_point = i["quantization"]
|
||||
if scale == 0.003986024297773838 and zero_point == -128:
|
||||
# fast path for quantization 1/255 = 0.003986024297773838
|
||||
im = im.view(np.int8)
|
||||
im -= 128
|
||||
else:
|
||||
im = im.astype(np.float32) / (255.0 * scale)
|
||||
im = (im + zero_point).astype(np.int8) # de-scale
|
||||
im = np.stack([input])
|
||||
i = interpreter.get_input_details()[0]
|
||||
if i["dtype"] == np.int8:
|
||||
scale, zero_point = i["quantization"]
|
||||
if scale == 0.003986024297773838 and zero_point == -128:
|
||||
# fast path for quantization 1/255 = 0.003986024297773838
|
||||
im = im.view(np.int8)
|
||||
im -= 128
|
||||
else:
|
||||
# this code path is unused.
|
||||
im = im.astype(np.float32) / 255.0
|
||||
interpreter.set_tensor(tensor_index, im)
|
||||
interpreter.invoke()
|
||||
output_details = interpreter.get_output_details()
|
||||
output = output_details[0]
|
||||
x = interpreter.get_tensor(output["index"])
|
||||
input_scale = self.get_input_details()[0]
|
||||
if x.dtype == np.int8:
|
||||
scale, zero_point = output["quantization"]
|
||||
threshold = yolo.defaultThreshold / scale + zero_point
|
||||
combined_scale = scale * input_scale
|
||||
objs = yolo.parse_yolov9(
|
||||
x[0],
|
||||
threshold,
|
||||
scale=lambda v: (v - zero_point) * combined_scale,
|
||||
confidence_scale=lambda v: (v - zero_point) * scale,
|
||||
)
|
||||
else:
|
||||
# this code path is unused.
|
||||
objs = yolo.parse_yolov9(x[0], scale=lambda v: v * input_scale)
|
||||
im = im.astype(np.float32) / (255.0 * scale)
|
||||
im = (im + zero_point).astype(np.int8) # de-scale
|
||||
else:
|
||||
tflite_common.set_input(interpreter, input)
|
||||
interpreter.invoke()
|
||||
objs = detect.get_objects(
|
||||
interpreter, score_threshold=0.2, image_scale=(1, 1)
|
||||
# this code path is unused.
|
||||
im = im.astype(np.float32) / 255.0
|
||||
interpreter.set_tensor(tensor_index, im)
|
||||
interpreter.invoke()
|
||||
output_details = interpreter.get_output_details()
|
||||
output = output_details[0]
|
||||
x = interpreter.get_tensor(output["index"])
|
||||
input_scale = self.get_input_details()[0]
|
||||
if x.dtype == np.int8:
|
||||
scale, zero_point = output["quantization"]
|
||||
threshold = yolo.defaultThreshold / scale + zero_point
|
||||
combined_scale = scale * input_scale
|
||||
objs = yolo.parse_yolov9(
|
||||
x[0],
|
||||
threshold,
|
||||
scale=lambda v: (v - zero_point) * combined_scale,
|
||||
confidence_scale=lambda v: (v - zero_point) * scale,
|
||||
)
|
||||
return objs
|
||||
finally:
|
||||
self.interpreters.put(interpreter)
|
||||
else:
|
||||
# this code path is unused.
|
||||
objs = yolo.parse_yolov9(x[0], scale=lambda v: v * input_scale)
|
||||
else:
|
||||
tflite_common.set_input(interpreter, input)
|
||||
interpreter.invoke()
|
||||
objs = detect.get_objects(
|
||||
interpreter, score_threshold=0.2, image_scale=(1, 1)
|
||||
)
|
||||
return objs
|
||||
|
||||
objs = await asyncio.get_event_loop().run_in_executor(self.executor, predict)
|
||||
|
||||
|
||||
26
plugins/unifi-protect/package-lock.json
generated
26
plugins/unifi-protect/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/unifi-protect",
|
||||
"version": "0.0.146",
|
||||
"version": "0.0.149",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/unifi-protect",
|
||||
"version": "0.0.146",
|
||||
"version": "0.0.149",
|
||||
"license": "Apache",
|
||||
"dependencies": {
|
||||
"@koush/unifi-protect": "file:../../external/unifi-protect",
|
||||
@@ -27,12 +27,12 @@
|
||||
"dependencies": {
|
||||
"@scrypted/sdk": "file:../sdk",
|
||||
"@scrypted/server": "file:../server",
|
||||
"http-auth-utils": "^3.0.2",
|
||||
"node-fetch-commonjs": "^3.1.1",
|
||||
"typescript": "^4.4.3"
|
||||
"http-auth-utils": "^5.0.1",
|
||||
"typescript": "^5.3.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^16.9.0"
|
||||
"@types/node": "^20.11.0",
|
||||
"ts-node": "^10.9.2"
|
||||
}
|
||||
},
|
||||
"../../external/unifi-protect": {
|
||||
@@ -61,12 +61,12 @@
|
||||
},
|
||||
"../../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.2.103",
|
||||
"version": "0.3.31",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
"adm-zip": "^0.4.13",
|
||||
"axios": "^0.21.4",
|
||||
"axios": "^1.6.5",
|
||||
"babel-loader": "^9.1.0",
|
||||
"babel-plugin-const-enum": "^1.1.0",
|
||||
"esbuild": "^0.15.9",
|
||||
@@ -260,10 +260,10 @@
|
||||
"requires": {
|
||||
"@scrypted/sdk": "file:../sdk",
|
||||
"@scrypted/server": "file:../server",
|
||||
"@types/node": "^16.9.0",
|
||||
"http-auth-utils": "^3.0.2",
|
||||
"node-fetch-commonjs": "^3.1.1",
|
||||
"typescript": "^4.4.3"
|
||||
"@types/node": "^20.11.0",
|
||||
"http-auth-utils": "^5.0.1",
|
||||
"ts-node": "^10.9.2",
|
||||
"typescript": "^5.3.3"
|
||||
}
|
||||
},
|
||||
"@scrypted/sdk": {
|
||||
@@ -273,7 +273,7 @@
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/stringify-object": "^4.0.0",
|
||||
"adm-zip": "^0.4.13",
|
||||
"axios": "^0.21.4",
|
||||
"axios": "^1.6.5",
|
||||
"babel-loader": "^9.1.0",
|
||||
"babel-plugin-const-enum": "^1.1.0",
|
||||
"esbuild": "^0.15.9",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/unifi-protect",
|
||||
"version": "0.0.146",
|
||||
"version": "0.0.149",
|
||||
"description": "Unifi Protect Plugin for Scrypted",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache",
|
||||
|
||||
@@ -157,10 +157,11 @@ export class UnifiProtect extends ScryptedDeviceBase implements Settings, Device
|
||||
const payload = updatePacket.payload as ProtectNvrUpdatePayloadEventAdd;
|
||||
if (!payload.camera)
|
||||
return;
|
||||
const unifiCamera = this.cameras.get(payload.camera);
|
||||
const nativeId = this.getNativeId({ id: payload.camera }, false);
|
||||
const unifiCamera = this.cameras.get(nativeId);
|
||||
|
||||
if (!unifiCamera) {
|
||||
this.console.log('unknown device event, sync needed?', payload.camera);
|
||||
this.console.log('unknown device event, sync needed?', payload);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -195,7 +196,7 @@ export class UnifiProtect extends ScryptedDeviceBase implements Settings, Device
|
||||
// id: '661d86bf03e69c03e408d62a',
|
||||
// modelKey: 'event'
|
||||
// }
|
||||
|
||||
|
||||
if (payload.type === 'smartDetectZone' || payload.type === 'smartDetectLine') {
|
||||
unifiCamera.resetDetectionTimeout();
|
||||
|
||||
@@ -602,7 +603,7 @@ export class UnifiProtect extends ScryptedDeviceBase implements Settings, Device
|
||||
return this.storageSettings.values.idMaps.nativeId?.[nativeId] || nativeId;
|
||||
}
|
||||
|
||||
getNativeId(device: any, update: boolean) {
|
||||
getNativeId(device: { id?: string, mac?: string; anonymousDeviceId?: string }, update: boolean) {
|
||||
const { id, mac, anonymousDeviceId } = device;
|
||||
const idMaps = this.storageSettings.values.idMaps;
|
||||
|
||||
|
||||
4
plugins/webrtc/package-lock.json
generated
4
plugins/webrtc/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/webrtc",
|
||||
"version": "0.2.23",
|
||||
"version": "0.2.24",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/webrtc",
|
||||
"version": "0.2.23",
|
||||
"version": "0.2.24",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
"@scrypted/sdk": "file:../../sdk",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/webrtc",
|
||||
"version": "0.2.23",
|
||||
"version": "0.2.24",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
"prescrypted-setup-project": "scrypted-package-json",
|
||||
|
||||
@@ -68,7 +68,7 @@ async function setupRtspClient(console: Console, rtspClient: RtspClient, channel
|
||||
path: section.control,
|
||||
onRtp: (rtspHeader, rtp) => deliver(rtp),
|
||||
});
|
||||
console.log('rtsp/udp', section.codec, result);
|
||||
// console.log('rtsp/udp', section.codec, result);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -82,7 +82,7 @@ async function setupRtspClient(console: Console, rtspClient: RtspClient, channel
|
||||
path: section.control,
|
||||
onRtp: (rtspHeader, rtp) => deliver(rtp),
|
||||
});
|
||||
console.log('rtsp/tcp', section.codec);
|
||||
// console.log('rtsp/tcp', section.codec);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user