mirror of
https://github.com/koush/scrypted.git
synced 2026-02-03 22:23:27 +00:00
Compare commits
69 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ea25682488 | ||
|
|
06e25e6a16 | ||
|
|
10847ef3f2 | ||
|
|
78184390ac | ||
|
|
9a0c88ac61 | ||
|
|
646dd3613c | ||
|
|
ab87abb859 | ||
|
|
5ce1a2b406 | ||
|
|
1abda3b425 | ||
|
|
c759becac6 | ||
|
|
b3a16c0000 | ||
|
|
0163a804cd | ||
|
|
ab157b16f1 | ||
|
|
905a9aec21 | ||
|
|
8e63dcdb15 | ||
|
|
05cad811e8 | ||
|
|
69a3e1138b | ||
|
|
9c9e29068b | ||
|
|
b8bb6dfa61 | ||
|
|
809956a2a4 | ||
|
|
0be72a70a5 | ||
|
|
9d03566246 | ||
|
|
7c023dbdf6 | ||
|
|
1f2187fd6a | ||
|
|
83b60b7b2b | ||
|
|
edfdd5c1a8 | ||
|
|
cdd350f52b | ||
|
|
1594364194 | ||
|
|
8dac20ed1c | ||
|
|
20beacb746 | ||
|
|
ac51fa6355 | ||
|
|
05a60831e6 | ||
|
|
dd13fee049 | ||
|
|
31fd833873 | ||
|
|
a0e5dd4c89 | ||
|
|
215daf5af7 | ||
|
|
a82972d967 | ||
|
|
6fd6c7af14 | ||
|
|
6d1cf5d3c1 | ||
|
|
0cfef48954 | ||
|
|
e9722d3875 | ||
|
|
fa8d17bec9 | ||
|
|
d69ec69038 | ||
|
|
106fc1bf58 | ||
|
|
4b055f55e1 | ||
|
|
3a70625308 | ||
|
|
7a382a8eba | ||
|
|
6d520dc4b2 | ||
|
|
40c7132ec0 | ||
|
|
4d2a038f19 | ||
|
|
a8bfdb6610 | ||
|
|
9817b0144e | ||
|
|
f662bd7de4 | ||
|
|
de52cec190 | ||
|
|
9a8e48e3c4 | ||
|
|
0560d857c1 | ||
|
|
4ee72cd074 | ||
|
|
7120ff430f | ||
|
|
167c66f8d6 | ||
|
|
4d98ccf86b | ||
|
|
ff2d1d5f97 | ||
|
|
ebe19532fc | ||
|
|
1294fc291a | ||
|
|
39c637a95f | ||
|
|
2fb6331e7b | ||
|
|
e7fd88bf2a | ||
|
|
96455dc38e | ||
|
|
4301911e86 | ||
|
|
1ddbe2fac8 |
97
.github/workflows/docker-common.yml
vendored
97
.github/workflows/docker-common.yml
vendored
@@ -7,13 +7,10 @@ jobs:
|
||||
build:
|
||||
name: Push Docker image to Docker Hub
|
||||
runs-on: self-hosted
|
||||
# runs-on: ubuntu-latest
|
||||
env:
|
||||
NODE_VERSION: '20'
|
||||
strategy:
|
||||
matrix:
|
||||
NODE_VERSION: [
|
||||
# "18",
|
||||
"20"
|
||||
]
|
||||
BASE: ["jammy"]
|
||||
FLAVOR: ["full", "lite"]
|
||||
steps:
|
||||
@@ -23,12 +20,26 @@ jobs:
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up SSH
|
||||
uses: MrSquaare/ssh-setup-action@v2
|
||||
with:
|
||||
host: ${{ secrets.DOCKER_SSH_HOST_AMD64 }}
|
||||
private-key: ${{ secrets.DOCKER_SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Set up SSH
|
||||
uses: MrSquaare/ssh-setup-action@v2
|
||||
with:
|
||||
host: ${{ secrets.DOCKER_SSH_HOST_ARM64 }}
|
||||
private-key: ${{ secrets.DOCKER_SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
platforms: linux/amd64
|
||||
append: |
|
||||
- endpoint: ssh://${{ secrets.DOCKER_SSH_USER }}@${{ secrets.DOCKER_SSH_HOST_AMD64 }}
|
||||
platforms: linux/amd64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
@@ -54,14 +65,84 @@ jobs:
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
build-args: |
|
||||
NODE_VERSION=${{ matrix.NODE_VERSION }}
|
||||
NODE_VERSION=${{ env.NODE_VERSION }}
|
||||
BASE=${{ matrix.BASE }}
|
||||
context: install/docker/
|
||||
file: install/docker/Dockerfile.${{ matrix.FLAVOR }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
koush/scrypted-common:${{ matrix.NODE_VERSION }}-${{ matrix.BASE }}-${{ matrix.FLAVOR }}
|
||||
ghcr.io/koush/scrypted-common:${{ matrix.NODE_VERSION }}-${{ matrix.BASE }}-${{ matrix.FLAVOR }}
|
||||
koush/scrypted-common:${{ matrix.BASE }}-${{ matrix.FLAVOR }}
|
||||
ghcr.io/koush/scrypted-common:${{ matrix.BASE }}-${{ matrix.FLAVOR }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
build-nvidia:
|
||||
name: Push NVIDIA Docker image to Docker Hub
|
||||
needs: build
|
||||
runs-on: self-hosted
|
||||
strategy:
|
||||
matrix:
|
||||
BASE: ["jammy"]
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up SSH
|
||||
uses: MrSquaare/ssh-setup-action@v2
|
||||
with:
|
||||
host: ${{ secrets.DOCKER_SSH_HOST_AMD64 }}
|
||||
private-key: ${{ secrets.DOCKER_SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Set up SSH
|
||||
uses: MrSquaare/ssh-setup-action@v2
|
||||
with:
|
||||
host: ${{ secrets.DOCKER_SSH_HOST_ARM64 }}
|
||||
private-key: ${{ secrets.DOCKER_SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
platforms: linux/amd64
|
||||
append: |
|
||||
- endpoint: ssh://${{ secrets.DOCKER_SSH_USER }}@${{ secrets.DOCKER_SSH_HOST_AMD64 }}
|
||||
platforms: linux/amd64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
platforms: linux/arm64
|
||||
append: |
|
||||
- endpoint: ssh://${{ secrets.DOCKER_SSH_USER }}@${{ secrets.DOCKER_SSH_HOST_ARM64 }}
|
||||
platforms: linux/arm64
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_PASSWORD }}
|
||||
|
||||
- name: Login to Github Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image (scrypted-common)
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
build-args: |
|
||||
BASE=ghcr.io/koush/scrypted-common:${{ matrix.BASE }}-full
|
||||
context: install/docker/
|
||||
file: install/docker/Dockerfile.nvidia
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
koush/scrypted-common:${{ matrix.BASE }}-nvidia
|
||||
ghcr.io/koush/scrypted-common:${{ matrix.BASE }}-nvidia
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
44
.github/workflows/docker.yml
vendored
44
.github/workflows/docker.yml
vendored
@@ -20,10 +20,10 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
BASE: [
|
||||
"20-jammy-full",
|
||||
"20-jammy-lite",
|
||||
["jammy-nvidia", ".s6"],
|
||||
["jammy-full", ".s6"],
|
||||
["jammy-lite", ""],
|
||||
]
|
||||
SUPERVISOR: ["", ".s6"]
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v3
|
||||
@@ -42,12 +42,26 @@ jobs:
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up SSH
|
||||
uses: MrSquaare/ssh-setup-action@v2
|
||||
with:
|
||||
host: ${{ secrets.DOCKER_SSH_HOST_AMD64 }}
|
||||
private-key: ${{ secrets.DOCKER_SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Set up SSH
|
||||
uses: MrSquaare/ssh-setup-action@v2
|
||||
with:
|
||||
host: ${{ secrets.DOCKER_SSH_HOST_ARM64 }}
|
||||
private-key: ${{ secrets.DOCKER_SSH_PRIVATE_KEY }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
platforms: linux/amd64
|
||||
append: |
|
||||
- endpoint: ssh://${{ secrets.DOCKER_SSH_USER }}@${{ secrets.DOCKER_SSH_HOST_AMD64 }}
|
||||
platforms: linux/amd64
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
@@ -73,23 +87,23 @@ jobs:
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
build-args: |
|
||||
BASE=${{ matrix.BASE }}
|
||||
BASE=${{ matrix.BASE[0] }}
|
||||
SCRYPTED_INSTALL_VERSION=${{ steps.package-version.outputs.NPM_VERSION }}
|
||||
context: install/docker/
|
||||
file: install/docker/Dockerfile${{ matrix.SUPERVISOR }}
|
||||
file: install/docker/Dockerfile${{ matrix.BASE[1] }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ format('koush/scrypted:{0}{1}-v{2}', matrix.BASE, matrix.SUPERVISOR, github.event.inputs.publish_tag || steps.package-version.outputs.NPM_VERSION) }}
|
||||
${{ matrix.BASE == '20-jammy-full' && matrix.SUPERVISOR == '.s6' && format('koush/scrypted:{0}', github.event.inputs.tag) || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE == '20-jammy-full' && matrix.SUPERVISOR == '' && 'koush/scrypted:full' || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE == '20-jammy-lite' && matrix.SUPERVISOR == '' && 'koush/scrypted:lite' || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE == '20-jammy-lite' && matrix.SUPERVISOR == '.s6' && 'koush/scrypted:lite-s6' || '' }}
|
||||
${{ format('koush/scrypted:v{1}-{0}', matrix.BASE[0], github.event.inputs.publish_tag || steps.package-version.outputs.NPM_VERSION) }}
|
||||
${{ matrix.BASE[0] == 'jammy-full' && format('koush/scrypted:{0}', github.event.inputs.tag) || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE[0] == 'jammy-nvidia' && 'koush/scrypted:nvidia' || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE[0] == 'jammy-full' && 'koush/scrypted:full' || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE[0] == 'jammy-lite' && 'koush/scrypted:lite' || '' }}
|
||||
|
||||
${{ format('ghcr.io/koush/scrypted:{0}{1}-v{2}', matrix.BASE, matrix.SUPERVISOR, github.event.inputs.publish_tag || steps.package-version.outputs.NPM_VERSION) }}
|
||||
${{ matrix.BASE == '20-jammy-full' && matrix.SUPERVISOR == '.s6' && format('ghcr.io/koush/scrypted:{0}', github.event.inputs.tag) || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE == '20-jammy-full' && matrix.SUPERVISOR == '' && 'ghcr.io/koush/scrypted:full' || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE == '20-jammy-lite' && matrix.SUPERVISOR == '' && 'ghcr.io/koush/scrypted:lite' || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE == '20-jammy-lite' && matrix.SUPERVISOR == '.s6' && 'ghcr.io/koush/scrypted:lite-s6' || '' }}
|
||||
${{ format('ghcr.io/koush/scrypted:v{1}-{0}', matrix.BASE[0], github.event.inputs.publish_tag || steps.package-version.outputs.NPM_VERSION) }}
|
||||
${{ matrix.BASE[0] == 'jammy-full' && format('ghcr.io/koush/scrypted:{0}', github.event.inputs.tag) || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE[0] == 'jammy-nvidia' && 'ghcr.io/koush/scrypted:nvidia' || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE[0] == 'jammy-full' && 'ghcr.io/koush/scrypted:full' || '' }}
|
||||
${{ github.event.inputs.tag == 'latest' && matrix.BASE[0] == 'jammy-lite' && 'ghcr.io/koush/scrypted:lite' || '' }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
# Home Assistant Addon Configuration
|
||||
name: Scrypted
|
||||
version: "20-jammy-full.s6-v0.97.0"
|
||||
version: "20-jammy-full.s6-v0.99.0"
|
||||
slug: scrypted
|
||||
description: Scrypted is a high performance home video integration and automation platform
|
||||
url: "https://github.com/koush/scrypted"
|
||||
arch:
|
||||
- amd64
|
||||
- aarch64
|
||||
- armv7
|
||||
init: false
|
||||
ingress: true
|
||||
ingress_port: 11080
|
||||
|
||||
@@ -1,14 +1,6 @@
|
||||
FROM ghcr.io/koush/scrypted:20-jammy-full.s6
|
||||
ARG BASE="ghcr.io/koush/scrypted-common:20-jammy-full"
|
||||
FROM $BASE
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# Install miniconda
|
||||
ENV CONDA_DIR /opt/conda
|
||||
RUN apt update -y && apt -y install wget && wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
|
||||
/bin/bash ~/miniconda.sh -b -p /opt/conda
|
||||
# Put conda in path so we can use conda activate
|
||||
ENV PATH=$CONDA_DIR/bin:$PATH
|
||||
|
||||
RUN conda -y install -c conda-forge cudatoolkit cudnn
|
||||
ENV CONDA_PREFIX=/opt/conda
|
||||
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/
|
||||
# nvidia cudnn/libcublas etc.
|
||||
# for some reason this is not provided by the nvidia container toolkit
|
||||
RUN curl https://raw.githubusercontent.com/koush/scrypted/main/install/docker/install-nvidia-graphics.sh | bash
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
version: "3.5"
|
||||
|
||||
# The Scrypted docker-compose.yml file typically resides at:
|
||||
# ~/.scrypted/docker-compose.yml
|
||||
|
||||
@@ -40,14 +38,21 @@ services:
|
||||
# See volumes section below to use the host daemon.
|
||||
# - SCRYPTED_DOCKER_AVAHI=true
|
||||
|
||||
# Uncomment next 3 lines for Nvidia GPU support.
|
||||
# NVIDIA (Part 1 of 4)
|
||||
# - NVIDIA_VISIBLE_DEVICES=all
|
||||
# - NVIDIA_DRIVER_CAPABILITIES=all
|
||||
|
||||
# NVIDIA (Part 2 of 4)
|
||||
# runtime: nvidia
|
||||
# Necessary to communicate with host dbus for avahi-daemon.
|
||||
security_opt:
|
||||
- apparmor:unconfined
|
||||
|
||||
# NVIDIA (Part 3 of 4) - Use NVIDIA image, and remove subsequent default image.
|
||||
# image: ghcr.io/koush/scrypted:nvidia
|
||||
image: ghcr.io/koush/scrypted
|
||||
|
||||
volumes:
|
||||
# NVIDIA (Part 4 of 4)
|
||||
# - /etc/OpenCL/vendors/nvidia.icd:/etc/OpenCL/vendors/nvidia.icd
|
||||
|
||||
# Scrypted NVR Storage (Part 3 of 3)
|
||||
|
||||
# Modify to add the additional volume for Scrypted NVR.
|
||||
@@ -94,15 +99,16 @@ services:
|
||||
container_name: scrypted
|
||||
restart: unless-stopped
|
||||
network_mode: host
|
||||
image: ghcr.io/koush/scrypted
|
||||
|
||||
# logging is noisy and will unnecessarily wear on flash storage.
|
||||
# scrypted has per device in memory logging that is preferred.
|
||||
# enable the log file if enhanced debugging is necessary.
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "10"
|
||||
driver: "none"
|
||||
# driver: "json-file"
|
||||
# options:
|
||||
# max-size: "10m"
|
||||
# max-file: "10"
|
||||
labels:
|
||||
- "com.centurylinklabs.watchtower.scope=scrypted"
|
||||
|
||||
|
||||
16
install/docker/install-nvidia-graphics.sh
Normal file
16
install/docker/install-nvidia-graphics.sh
Normal file
@@ -0,0 +1,16 @@
|
||||
if [ "$(uname -m)" = "x86_64" ]
|
||||
then
|
||||
echo "Installing NVIDIA graphics packages."
|
||||
apt update -q \
|
||||
&& apt install -y wget \
|
||||
&& wget -qO /cuda-keyring.deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/$(uname -m)/cuda-keyring_1.1-1_all.deb \
|
||||
&& dpkg -i /cuda-keyring.deb \
|
||||
&& apt update -q \
|
||||
&& apt install -y cuda-nvcc-11-8 libcublas-11-8 libcudnn8 cuda-libraries-11-8 \
|
||||
&& apt install -y cuda-nvcc-12-4 libcublas-12-4 libcudnn8 cuda-libraries-12-4;
|
||||
exit $?
|
||||
else
|
||||
echo "NVIDIA graphics will not be installed on this architecture."
|
||||
fi
|
||||
|
||||
exit 0
|
||||
@@ -110,10 +110,12 @@ User=$SERVICE_USER
|
||||
Group=$SERVICE_USER
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/npx -y scrypted serve
|
||||
Restart=on-failure
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
Environment="NODE_OPTIONS=$NODE_OPTIONS"
|
||||
Environment="SCRYPTED_INSTALL_ENVIRONMENT=$SCRYPTED_INSTALL_ENVIRONMENT"
|
||||
StandardOutput=null
|
||||
StandardError=null
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -34,7 +34,8 @@ $SCRYPTED_HOME_ESCAPED_PATH = $SCRYPTED_HOME.replace('\', '\\')
|
||||
npm install --prefix $SCRYPTED_HOME @koush/node-windows --save
|
||||
|
||||
$NPX_PATH = (Get-Command npx).Path
|
||||
$NPX_PATH_ESCAPED = $NPX_PATH.replace('\', '\\')
|
||||
# The path needs double quotes to handle spaces in the directory path
|
||||
$NPX_PATH_ESCAPED = '"' + $NPX_PATH.replace('\', '\\') + '"'
|
||||
|
||||
$SERVICE_JS = @"
|
||||
const fs = require('fs');
|
||||
@@ -44,8 +45,10 @@ try {
|
||||
catch (e) {
|
||||
}
|
||||
const child_process = require('child_process');
|
||||
child_process.spawn('$($NPX_PATH_ESCAPED)', ['-y', 'scrypted', 'serve'], {
|
||||
child_process.spawn('$NPX_PATH_ESCAPED', ['-y', 'scrypted', 'serve'], {
|
||||
stdio: 'inherit',
|
||||
// allow spawning .cmd https://nodejs.org/en/blog/vulnerability/april-2024-security-releases-2
|
||||
shell: true,
|
||||
});
|
||||
"@
|
||||
|
||||
|
||||
4
packages/cli/package-lock.json
generated
4
packages/cli/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "scrypted",
|
||||
"version": "1.3.14",
|
||||
"version": "1.3.15",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "scrypted",
|
||||
"version": "1.3.14",
|
||||
"version": "1.3.15",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@scrypted/client": "^1.3.3",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "scrypted",
|
||||
"version": "1.3.14",
|
||||
"version": "1.3.15",
|
||||
"description": "",
|
||||
"main": "./dist/packages/cli/src/main.js",
|
||||
"bin": {
|
||||
|
||||
@@ -24,6 +24,8 @@ async function runCommand(command: string, ...args: string[]) {
|
||||
// https://github.com/lovell/sharp/blob/eefaa998725cf345227d94b40615e090495c6d09/lib/libvips.js#L115C19-L115C46
|
||||
SHARP_IGNORE_GLOBAL_LIBVIPS: 'true',
|
||||
},
|
||||
// allow spawning .cmd https://nodejs.org/en/blog/vulnerability/april-2024-security-releases-2
|
||||
shell: os.platform() === 'win32' ? true : undefined,
|
||||
});
|
||||
await once(cp, 'exit');
|
||||
if (cp.exitCode)
|
||||
|
||||
@@ -15,6 +15,8 @@ Environment="SCRYPTED_PYTHON39_PATH=/usr/bin/python3.9"
|
||||
Environment="SCRYPTED_PYTHON310_PATH=/usr/bin/python3.10"
|
||||
Environment="SCRYPTED_FFMPEG_PATH=/usr/bin/ffmpeg"
|
||||
Environment="SCRYPTED_INSTALL_ENVIRONMENT=lxc"
|
||||
StandardOutput=null
|
||||
StandardError=null
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
4
plugins/core/package-lock.json
generated
4
plugins/core/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/core",
|
||||
"version": "0.3.23",
|
||||
"version": "0.3.24",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/core",
|
||||
"version": "0.3.23",
|
||||
"version": "0.3.24",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/core",
|
||||
"version": "0.3.23",
|
||||
"version": "0.3.24",
|
||||
"description": "Scrypted Core plugin. Provides the UI, websocket, and engine.io APIs.",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
4
plugins/coreml/package-lock.json
generated
4
plugins/coreml/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/coreml",
|
||||
"version": "0.1.45",
|
||||
"version": "0.1.49",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/coreml",
|
||||
"version": "0.1.45",
|
||||
"version": "0.1.49",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -42,5 +42,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.45"
|
||||
"version": "0.1.49"
|
||||
}
|
||||
|
||||
@@ -13,7 +13,12 @@ from PIL import Image
|
||||
from scrypted_sdk import Setting, SettingValue
|
||||
|
||||
from common import yolo
|
||||
from coreml.recognition import CoreMLRecognition
|
||||
from coreml.face_recognition import CoreMLFaceRecognition
|
||||
|
||||
try:
|
||||
from coreml.text_recognition import CoreMLTextRecognition
|
||||
except:
|
||||
CoreMLTextRecognition = None
|
||||
from predict import Prediction, PredictPlugin
|
||||
from predict.rectangle import Rectangle
|
||||
|
||||
@@ -131,25 +136,43 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
|
||||
|
||||
async def prepareRecognitionModels(self):
|
||||
try:
|
||||
devices = [
|
||||
{
|
||||
"nativeId": "facerecognition",
|
||||
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
|
||||
"interfaces": [
|
||||
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
|
||||
],
|
||||
"name": "CoreML Face Recognition",
|
||||
},
|
||||
]
|
||||
|
||||
if CoreMLTextRecognition:
|
||||
devices.append(
|
||||
{
|
||||
"nativeId": "textrecognition",
|
||||
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
|
||||
"interfaces": [
|
||||
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
|
||||
],
|
||||
"name": "CoreML Text Recognition",
|
||||
},
|
||||
)
|
||||
|
||||
await scrypted_sdk.deviceManager.onDevicesChanged(
|
||||
{
|
||||
"devices": [
|
||||
{
|
||||
"nativeId": "recognition",
|
||||
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
|
||||
"interfaces": [
|
||||
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
|
||||
],
|
||||
"name": "CoreML Recognition",
|
||||
}
|
||||
]
|
||||
"devices": devices,
|
||||
}
|
||||
)
|
||||
except:
|
||||
pass
|
||||
|
||||
async def getDevice(self, nativeId: str) -> Any:
|
||||
return CoreMLRecognition(nativeId)
|
||||
if nativeId == "facerecognition":
|
||||
return CoreMLFaceRecognition(nativeId)
|
||||
if nativeId == "textrecognition":
|
||||
return CoreMLTextRecognition(nativeId)
|
||||
raise Exception("unknown device")
|
||||
|
||||
async def getSettings(self) -> list[Setting]:
|
||||
model = self.storage.getItem("model") or "Default"
|
||||
@@ -174,7 +197,7 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
|
||||
|
||||
def get_input_size(self) -> Tuple[float, float]:
|
||||
return (self.inputwidth, self.inputheight)
|
||||
|
||||
|
||||
async def detect_batch(self, inputs: List[Any]) -> List[Any]:
|
||||
out_dicts = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor, lambda: self.model.predict(inputs)
|
||||
|
||||
@@ -9,7 +9,7 @@ import numpy as np
|
||||
# from Foundation import NSData, NSMakeSize
|
||||
|
||||
# import Vision
|
||||
from predict.recognize import RecognizeDetection
|
||||
from predict.face_recognize import FaceRecognizeDetection
|
||||
|
||||
|
||||
def euclidean_distance(arr1, arr2):
|
||||
@@ -26,7 +26,7 @@ def cosine_similarity(vector_a, vector_b):
|
||||
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(8, "Vision-Predict")
|
||||
|
||||
class CoreMLRecognition(RecognizeDetection):
|
||||
class CoreMLFaceRecognition(FaceRecognizeDetection):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(nativeId=nativeId)
|
||||
|
||||
45
plugins/coreml/src/coreml/text_recognition.py
Normal file
45
plugins/coreml/src/coreml/text_recognition.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
||||
import coremltools as ct
|
||||
|
||||
from predict.text_recognize import TextRecognition
|
||||
|
||||
|
||||
class CoreMLTextRecognition(TextRecognition):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(nativeId=nativeId)
|
||||
|
||||
def downloadModel(self, model: str):
|
||||
model_version = "v7"
|
||||
mlmodel = "model"
|
||||
|
||||
files = [
|
||||
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/weights/weight.bin",
|
||||
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{mlmodel}.mlmodel",
|
||||
f"{model}/{model}.mlpackage/Manifest.json",
|
||||
]
|
||||
|
||||
for f in files:
|
||||
p = self.downloadFile(
|
||||
f"https://github.com/koush/coreml-models/raw/main/{f}",
|
||||
f"{model_version}/{f}",
|
||||
)
|
||||
modelFile = os.path.dirname(p)
|
||||
|
||||
model = ct.models.MLModel(modelFile)
|
||||
inputName = model.get_spec().description.input[0].name
|
||||
return model, inputName
|
||||
|
||||
def predictDetectModel(self, input):
|
||||
model, inputName = self.detectModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
results = list(out_dict.values())[0]
|
||||
return results
|
||||
|
||||
def predictTextModel(self, input):
|
||||
model, inputName = self.textModel
|
||||
out_dict = model.predict({inputName: input})
|
||||
preds = out_dict["linear_2"]
|
||||
return preds
|
||||
@@ -0,0 +1 @@
|
||||
opencv-python
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
# 2024-04-23 - modify timestamp to force pip reinstall
|
||||
coremltools==7.1
|
||||
Pillow>=5.4.1
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# Dlib Face Recognition for Scrypted
|
||||
|
||||
This plugin adds face recognition capabilities to any camera in Scrypted.
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 36 KiB |
@@ -1,252 +0,0 @@
|
||||
from __future__ import annotations
|
||||
import re
|
||||
import scrypted_sdk
|
||||
from typing import Any, Tuple
|
||||
from predict import PredictPlugin, Prediction, Rectangle
|
||||
import os
|
||||
from PIL import Image
|
||||
import face_recognition
|
||||
import numpy as np
|
||||
from typing import Any, List, Tuple, Mapping
|
||||
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected, Setting
|
||||
from predict import PredictSession
|
||||
import threading
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
from scrypted_sdk import RequestPictureOptions, MediaObject, Setting
|
||||
import os
|
||||
import json
|
||||
|
||||
def random_string():
|
||||
letters = string.ascii_lowercase
|
||||
return ''.join(random.choice(letters) for i in range(10))
|
||||
|
||||
|
||||
MIME_TYPE = 'x-scrypted-dlib/x-raw-image'
|
||||
|
||||
class DlibPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(MIME_TYPE, nativeId=nativeId)
|
||||
|
||||
self.labels = {
|
||||
0: 'face'
|
||||
}
|
||||
|
||||
self.mutex = threading.Lock()
|
||||
self.known_faces = {}
|
||||
self.encoded_faces = {}
|
||||
self.load_known_faces()
|
||||
|
||||
def save_known_faces(self):
|
||||
j = json.dumps(self.known_faces)
|
||||
self.storage.setItem('known', j)
|
||||
|
||||
def load_known_faces(self):
|
||||
self.known_faces = {}
|
||||
self.encoded_faces = {}
|
||||
|
||||
try:
|
||||
self.known_faces = json.loads(self.storage.getItem('known'))
|
||||
except:
|
||||
pass
|
||||
|
||||
for known in self.known_faces:
|
||||
encoded = []
|
||||
self.encoded_faces[known] = encoded
|
||||
encodings = self.known_faces[known]
|
||||
for str in encodings:
|
||||
try:
|
||||
parsed = base64.decodebytes(bytes(str, 'utf-8'))
|
||||
encoding = np.frombuffer(parsed, dtype=np.float64)
|
||||
encoded.append(encoding)
|
||||
except:
|
||||
pass
|
||||
|
||||
# width, height, channels
|
||||
def get_input_details(self) -> Tuple[int, int, int]:
|
||||
pass
|
||||
|
||||
def get_input_size(self) -> Tuple[float, float]:
|
||||
pass
|
||||
|
||||
def getTriggerClasses(self) -> list[str]:
|
||||
return ['person']
|
||||
|
||||
def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
|
||||
nparray = np.array(input.resize((int(input.width / 4), int(input.height / 4))))
|
||||
|
||||
with self.mutex:
|
||||
face_locations = face_recognition.face_locations(nparray)
|
||||
|
||||
for idx, face in enumerate(face_locations):
|
||||
t, r, b, l = face
|
||||
t *= 4
|
||||
r *= 4
|
||||
b *= 4
|
||||
l *= 4
|
||||
face_locations[idx] = (t, r, b, l)
|
||||
|
||||
nparray = np.array(input)
|
||||
|
||||
with self.mutex:
|
||||
face_encodings = face_recognition.face_encodings(nparray, face_locations)
|
||||
|
||||
all_ids = []
|
||||
all_faces = []
|
||||
for encoded in self.encoded_faces:
|
||||
all_ids += ([encoded] * len(self.encoded_faces[encoded]))
|
||||
all_faces += self.encoded_faces[encoded]
|
||||
|
||||
m = {}
|
||||
for idx, fe in enumerate(face_encodings):
|
||||
results = list(face_recognition.face_distance(all_faces, fe))
|
||||
|
||||
best = 1
|
||||
if len(results):
|
||||
best = min(results)
|
||||
minpos = results.index(best)
|
||||
|
||||
if best > .6:
|
||||
id = random_string() + '.jpg'
|
||||
print('top face %s' % best)
|
||||
print('new face %s' % id)
|
||||
encoded = [fe]
|
||||
self.encoded_faces[id] = encoded
|
||||
all_faces += encoded
|
||||
|
||||
volume = os.environ['SCRYPTED_PLUGIN_VOLUME']
|
||||
people = os.path.join(volume, 'unknown')
|
||||
os.makedirs(people, exist_ok=True)
|
||||
t, r, b, l = face_locations[idx]
|
||||
cropped = input.crop((l, t, r, b))
|
||||
fp = os.path.join(people, id)
|
||||
cropped.save(fp)
|
||||
else:
|
||||
id = all_ids[minpos]
|
||||
print('has face %s' % id)
|
||||
m[idx] = id
|
||||
|
||||
# return
|
||||
|
||||
objs = []
|
||||
|
||||
for face in face_locations:
|
||||
t, r, b, l = face
|
||||
obj = Prediction(0, 1, Rectangle(
|
||||
l,
|
||||
t,
|
||||
r,
|
||||
b
|
||||
))
|
||||
objs.append(obj)
|
||||
|
||||
ret = self.create_detection_result(objs, src_size, ['face'], cvss)
|
||||
|
||||
for idx, d in enumerate(ret['detections']):
|
||||
d['id'] = m.get(idx)
|
||||
d['name'] = m.get(idx)
|
||||
|
||||
return ret
|
||||
|
||||
def track(self, detection_session: PredictSession, ret: ObjectsDetected):
|
||||
pass
|
||||
|
||||
|
||||
async def takePicture(self, options: RequestPictureOptions = None) -> MediaObject:
|
||||
volume = os.environ['SCRYPTED_PLUGIN_VOLUME']
|
||||
people = os.path.join(volume, 'unknown')
|
||||
os.makedirs(people, exist_ok=True)
|
||||
for unknown in os.listdir(people):
|
||||
fp = os.path.join(people, unknown)
|
||||
ret = scrypted_sdk.mediaManager.createMediaObjectFromUrl('file:/' + fp)
|
||||
return await ret
|
||||
|
||||
black = os.path.join(volume, 'zip', 'unzipped', 'fs', 'black.jpg')
|
||||
ret = scrypted_sdk.mediaManager.createMediaObjectFromUrl('file:/' + black)
|
||||
return await ret
|
||||
|
||||
async def getSettings(self) -> list[Setting]:
|
||||
ret = []
|
||||
|
||||
volume = os.environ['SCRYPTED_PLUGIN_VOLUME']
|
||||
people = os.path.join(volume, 'unknown')
|
||||
os.makedirs(people, exist_ok=True)
|
||||
|
||||
choices = list(self.known_faces.keys())
|
||||
|
||||
for unknown in os.listdir(people):
|
||||
ret.append(
|
||||
{
|
||||
'key': unknown,
|
||||
'title': 'Name',
|
||||
'description': 'Associate this thumbnail with an existing person or identify a new person.',
|
||||
'choices': choices,
|
||||
'combobox': True,
|
||||
}
|
||||
)
|
||||
ret.append(
|
||||
{
|
||||
'key': 'delete',
|
||||
'title': 'Delete',
|
||||
'description': 'Delete this face.',
|
||||
'type': 'button',
|
||||
}
|
||||
)
|
||||
break
|
||||
|
||||
if not len(ret):
|
||||
ret.append(
|
||||
{
|
||||
'key': 'unknown',
|
||||
'title': 'Unknown People',
|
||||
'value': 'Waiting for unknown person...',
|
||||
'description': 'There are no more people that need to be identified.',
|
||||
'readonly': True,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
ret.append(
|
||||
{
|
||||
'key': 'known',
|
||||
'group': 'People',
|
||||
'title': 'Familiar People',
|
||||
'description': 'The people known to this plugin.',
|
||||
'choices': choices,
|
||||
'multiple': True,
|
||||
'value': choices,
|
||||
}
|
||||
)
|
||||
|
||||
return ret
|
||||
|
||||
async def putSetting(self, key: str, value: str) -> None:
|
||||
if key == 'known':
|
||||
n = {}
|
||||
for k in value:
|
||||
n[k] = self.known_faces[k]
|
||||
self.known_faces = n
|
||||
self.save_known_faces()
|
||||
elif value or key == 'delete':
|
||||
volume = os.environ['SCRYPTED_PLUGIN_VOLUME']
|
||||
people = os.path.join(volume, 'unknown')
|
||||
os.makedirs(people, exist_ok=True)
|
||||
for unknown in os.listdir(people):
|
||||
fp = os.path.join(people, unknown)
|
||||
os.remove(fp)
|
||||
if key != 'delete':
|
||||
encoded = self.encoded_faces[key]
|
||||
strs = []
|
||||
for e in encoded:
|
||||
strs.append(base64.encodebytes(e.tobytes()).decode())
|
||||
if not self.known_faces.get(value):
|
||||
self.known_faces[value] = []
|
||||
self.known_faces[value] += strs
|
||||
self.save_known_faces()
|
||||
break
|
||||
|
||||
await self.onDeviceEvent(scrypted_sdk.ScryptedInterface.Settings.value, None)
|
||||
await self.onDeviceEvent(scrypted_sdk.ScryptedInterface.Camera.value, None)
|
||||
@@ -1,4 +0,0 @@
|
||||
from dlibplugin import DlibPlugin
|
||||
|
||||
def create_scrypted_plugin():
|
||||
return DlibPlugin()
|
||||
@@ -1 +0,0 @@
|
||||
../../tensorflow-lite/src/pipeline
|
||||
@@ -1,10 +0,0 @@
|
||||
# plugin
|
||||
Pillow>=5.4.1
|
||||
PyGObject>=3.30.4; sys_platform != 'win32'
|
||||
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
|
||||
face-recognition
|
||||
|
||||
# sort_oh
|
||||
scipy
|
||||
filterpy
|
||||
numpy
|
||||
@@ -214,7 +214,7 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
|
||||
continue;
|
||||
if (levenshteinDistance(label, d.label) <= labelDistance)
|
||||
return true;
|
||||
this.console.log('No label does not match.', label, d.label);
|
||||
this.console.log('Label does not match.', label, d.label);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
@@ -9,6 +9,7 @@ dist/*.js
|
||||
dist/*.txt
|
||||
__pycache__
|
||||
all_models
|
||||
.venv
|
||||
sort_oh
|
||||
download_models.sh
|
||||
tsconfig.json
|
||||
.venv
|
||||
@@ -1,16 +1,20 @@
|
||||
|
||||
{
|
||||
// docker installation
|
||||
// "scrypted.debugHost": "koushik-thin",
|
||||
// "scrypted.debugHost": "koushik-ubuntuvm",
|
||||
// "scrypted.serverRoot": "/server",
|
||||
|
||||
"scrypted.debugHost": "koushik-ubuntuvm",
|
||||
"scrypted.serverRoot": "/home/koush/.scrypted",
|
||||
|
||||
// pi local installation
|
||||
// "scrypted.debugHost": "192.168.2.119",
|
||||
// "scrypted.serverRoot": "/home/pi/.scrypted",
|
||||
|
||||
// local checkout
|
||||
"scrypted.debugHost": "127.0.0.1",
|
||||
"scrypted.serverRoot": "/Users/koush/.scrypted",
|
||||
// "scrypted.debugHost": "127.0.0.1",
|
||||
// "scrypted.serverRoot": "/Users/koush/.scrypted",
|
||||
// "scrypted.debugHost": "koushik-winvm",
|
||||
// "scrypted.serverRoot": "C:\\Users\\koush\\.scrypted",
|
||||
|
||||
"scrypted.pythonRemoteRoot": "${config:scrypted.serverRoot}/volume/plugin.zip",
|
||||
"python.analysis.extraPaths": [
|
||||
6
plugins/onnx/README.md
Normal file
6
plugins/onnx/README.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# ONNX Object Detection for Scrypted
|
||||
|
||||
This plugin adds object detection capabilities to any camera in Scrypted. Having a fast GPU and CPU is highly recommended.
|
||||
|
||||
The ONNX Plugin should only be used if you are a Scrypted NVR user. It will provide no
|
||||
benefits to HomeKit, which does its own detection processing.
|
||||
@@ -1,47 +1,48 @@
|
||||
{
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.1.18",
|
||||
"name": "@scrypted/openvino",
|
||||
"version": "0.1.81",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.1.18",
|
||||
"name": "@scrypted/openvino",
|
||||
"version": "0.1.81",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
},
|
||||
"../../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.2.39",
|
||||
"version": "0.3.29",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@babel/preset-typescript": "^7.16.7",
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
"adm-zip": "^0.4.13",
|
||||
"axios": "^0.21.4",
|
||||
"babel-loader": "^8.2.3",
|
||||
"axios": "^1.6.5",
|
||||
"babel-loader": "^9.1.0",
|
||||
"babel-plugin-const-enum": "^1.1.0",
|
||||
"esbuild": "^0.15.9",
|
||||
"ncp": "^2.0.0",
|
||||
"raw-loader": "^4.0.2",
|
||||
"rimraf": "^3.0.2",
|
||||
"tmp": "^0.2.1",
|
||||
"typescript": "^4.9.3",
|
||||
"webpack": "^5.74.0",
|
||||
"ts-loader": "^9.4.2",
|
||||
"typescript": "^4.9.4",
|
||||
"webpack": "^5.75.0",
|
||||
"webpack-bundle-analyzer": "^4.5.0"
|
||||
},
|
||||
"bin": {
|
||||
"scrypted-changelog": "bin/scrypted-changelog.js",
|
||||
"scrypted-debug": "bin/scrypted-debug.js",
|
||||
"scrypted-deploy": "bin/scrypted-deploy.js",
|
||||
"scrypted-deploy-debug": "bin/scrypted-deploy-debug.js",
|
||||
"scrypted-package-json": "bin/scrypted-package-json.js",
|
||||
"scrypted-readme": "bin/scrypted-readme.js",
|
||||
"scrypted-setup-project": "bin/scrypted-setup-project.js",
|
||||
"scrypted-webpack": "bin/scrypted-webpack.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^18.11.9",
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/stringify-object": "^4.0.0",
|
||||
"stringify-object": "^3.3.0",
|
||||
"ts-node": "^10.4.0",
|
||||
@@ -60,12 +61,12 @@
|
||||
"@scrypted/sdk": {
|
||||
"version": "file:../../sdk",
|
||||
"requires": {
|
||||
"@babel/preset-typescript": "^7.16.7",
|
||||
"@types/node": "^18.11.9",
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/stringify-object": "^4.0.0",
|
||||
"adm-zip": "^0.4.13",
|
||||
"axios": "^0.21.4",
|
||||
"babel-loader": "^8.2.3",
|
||||
"axios": "^1.6.5",
|
||||
"babel-loader": "^9.1.0",
|
||||
"babel-plugin-const-enum": "^1.1.0",
|
||||
"esbuild": "^0.15.9",
|
||||
"ncp": "^2.0.0",
|
||||
@@ -73,10 +74,11 @@
|
||||
"rimraf": "^3.0.2",
|
||||
"stringify-object": "^3.3.0",
|
||||
"tmp": "^0.2.1",
|
||||
"ts-loader": "^9.4.2",
|
||||
"ts-node": "^10.4.0",
|
||||
"typedoc": "^0.23.21",
|
||||
"typescript": "^4.9.3",
|
||||
"webpack": "^5.74.0",
|
||||
"typescript": "^4.9.4",
|
||||
"webpack": "^5.75.0",
|
||||
"webpack-bundle-analyzer": "^4.5.0"
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,11 @@
|
||||
{
|
||||
"name": "@scrypted/tensorflow",
|
||||
"description": "Scrypted TensorFlow Object Detection",
|
||||
"name": "@scrypted/onnx",
|
||||
"description": "Scrypted ONNX Object Detection",
|
||||
"keywords": [
|
||||
"scrypted",
|
||||
"plugin",
|
||||
"coreml",
|
||||
"neural",
|
||||
"onnx",
|
||||
"motion",
|
||||
"object",
|
||||
"detect",
|
||||
"detection",
|
||||
@@ -26,10 +26,9 @@
|
||||
"scrypted-package-json": "scrypted-package-json"
|
||||
},
|
||||
"scrypted": {
|
||||
"name": "TensorFlow Object Detection",
|
||||
"name": "ONNX Object Detection",
|
||||
"pluginDependencies": [
|
||||
"@scrypted/objectdetector",
|
||||
"@scrypted/python-codecs"
|
||||
"@scrypted/objectdetector"
|
||||
],
|
||||
"runtime": "python",
|
||||
"type": "API",
|
||||
@@ -42,5 +41,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.18"
|
||||
"version": "0.1.81"
|
||||
}
|
||||
1
plugins/onnx/src/common
Symbolic link
1
plugins/onnx/src/common
Symbolic link
@@ -0,0 +1 @@
|
||||
../../openvino/src/common
|
||||
4
plugins/onnx/src/main.py
Normal file
4
plugins/onnx/src/main.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from ort import ONNXPlugin
|
||||
|
||||
def create_scrypted_plugin():
|
||||
return ONNXPlugin()
|
||||
144
plugins/onnx/src/ort/__init__.py
Normal file
144
plugins/onnx/src/ort/__init__.py
Normal file
@@ -0,0 +1,144 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import Any, Tuple
|
||||
|
||||
import sys
|
||||
import platform
|
||||
import numpy as np
|
||||
import onnxruntime
|
||||
import scrypted_sdk
|
||||
from PIL import Image
|
||||
import ast
|
||||
from scrypted_sdk.other import SettingValue
|
||||
from scrypted_sdk.types import Setting
|
||||
import concurrent.futures
|
||||
|
||||
import common.yolo as yolo
|
||||
from predict import PredictPlugin
|
||||
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "ONNX-Predict")
|
||||
|
||||
availableModels = [
|
||||
"Default",
|
||||
"scrypted_yolov6n_320",
|
||||
"scrypted_yolov6n",
|
||||
"scrypted_yolov6s_320",
|
||||
"scrypted_yolov6s",
|
||||
"scrypted_yolov9c_320",
|
||||
"scrypted_yolov9c",
|
||||
"scrypted_yolov8n_320",
|
||||
"scrypted_yolov8n",
|
||||
]
|
||||
|
||||
|
||||
def parse_labels(names):
|
||||
j = ast.literal_eval(names)
|
||||
ret = {}
|
||||
for k, v in j.items():
|
||||
ret[int(k)] = v
|
||||
return ret
|
||||
|
||||
class ONNXPlugin(
|
||||
PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings, scrypted_sdk.DeviceProvider
|
||||
):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(nativeId=nativeId)
|
||||
|
||||
model = self.storage.getItem("model") or "Default"
|
||||
if model == "Default" or model not in availableModels:
|
||||
if model != "Default":
|
||||
self.storage.setItem("model", "Default")
|
||||
model = "scrypted_yolov8n_320"
|
||||
self.yolo = "yolo" in model
|
||||
self.scrypted_yolo = "scrypted_yolo" in model
|
||||
self.scrypted_model = "scrypted" in model
|
||||
|
||||
print(f"model {model}")
|
||||
|
||||
onnxmodel = "best" if self.scrypted_model else model
|
||||
|
||||
model_version = "v2"
|
||||
onnxfile = self.downloadFile(
|
||||
f"https://raw.githubusercontent.com/koush/onnx-models/main/{model}/{onnxmodel}.onnx",
|
||||
f"{model_version}/{model}/{onnxmodel}.onnx",
|
||||
)
|
||||
|
||||
print(onnxfile)
|
||||
|
||||
try:
|
||||
sess_options = onnxruntime.SessionOptions()
|
||||
|
||||
providers: list[str] = []
|
||||
if sys.platform == 'darwin':
|
||||
providers.append("CoreMLExecutionProvider")
|
||||
|
||||
if 'linux' in sys.platform and platform.machine() == 'x86_64':
|
||||
providers.append("CUDAExecutionProvider")
|
||||
|
||||
providers.append('CPUExecutionProvider')
|
||||
|
||||
self.compiled_model = onnxruntime.InferenceSession(onnxfile, sess_options=sess_options, providers=providers)
|
||||
except:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
print("Reverting all settings.")
|
||||
self.storage.removeItem("model")
|
||||
self.requestRestart()
|
||||
|
||||
input = self.compiled_model.get_inputs()[0]
|
||||
self.model_dim = input.shape[2]
|
||||
self.input_name = input.name
|
||||
self.labels = parse_labels(self.compiled_model.get_modelmeta().custom_metadata_map['names'])
|
||||
|
||||
async def getSettings(self) -> list[Setting]:
|
||||
model = self.storage.getItem("model") or "Default"
|
||||
return [
|
||||
{
|
||||
"key": "model",
|
||||
"title": "Model",
|
||||
"description": "The detection model used to find objects.",
|
||||
"choices": availableModels,
|
||||
"value": model,
|
||||
},
|
||||
]
|
||||
|
||||
async def putSetting(self, key: str, value: SettingValue):
|
||||
self.storage.setItem(key, value)
|
||||
await self.onDeviceEvent(scrypted_sdk.ScryptedInterface.Settings.value, None)
|
||||
self.requestRestart()
|
||||
|
||||
# width, height, channels
|
||||
def get_input_details(self) -> Tuple[int, int, int]:
|
||||
return [self.model_dim, self.model_dim, 3]
|
||||
|
||||
def get_input_size(self) -> Tuple[int, int]:
|
||||
return [self.model_dim, self.model_dim]
|
||||
|
||||
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
|
||||
def predict(input_tensor):
|
||||
output_tensors = self.compiled_model.run(None, { self.input_name: input_tensor })
|
||||
objs = yolo.parse_yolov9(output_tensors[0][0])
|
||||
return objs
|
||||
|
||||
im = np.array(input)
|
||||
im = np.stack([input])
|
||||
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
|
||||
im = im.astype(np.float32) / 255.0
|
||||
im = np.ascontiguousarray(im) # contiguous
|
||||
input_tensor = im
|
||||
|
||||
try:
|
||||
objs = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor, lambda: predict(input_tensor)
|
||||
)
|
||||
|
||||
except:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
raise
|
||||
|
||||
ret = self.create_detection_result(objs, src_size, cvss)
|
||||
return ret
|
||||
12
plugins/onnx/src/requirements.txt
Normal file
12
plugins/onnx/src/requirements.txt
Normal file
@@ -0,0 +1,12 @@
|
||||
# uncomment to require cuda 12, but most stuff is still targetting cuda 11.
|
||||
# however, stuff targetted for cuda 11 can still run on cuda 12.
|
||||
# --extra-index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/onnxruntime-cuda-12/pypi/simple/
|
||||
onnxruntime-gpu; 'linux' in sys_platform and platform_machine == 'x86_64'
|
||||
# cpu and coreml execution provider
|
||||
onnxruntime; 'linux' not in sys_platform or platform_machine != 'x86_64'
|
||||
# ort-nightly-gpu==1.17.3.dev20240409002
|
||||
|
||||
# pillow-simd is available on x64 linux
|
||||
# pillow-simd confirmed not building with arm64 linux or apple silicon
|
||||
Pillow>=5.4.1; 'linux' not in sys_platform or platform_machine != 'x86_64'
|
||||
pillow-simd; 'linux' in sys_platform and platform_machine == 'x86_64'
|
||||
@@ -1,9 +1,13 @@
|
||||
# plugin
|
||||
numpy>=1.16.2
|
||||
# pillow for anything not intel linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
|
||||
imutils>=0.5.0
|
||||
# opencv-python is not available on armhf
|
||||
|
||||
# locked to version because 4.8.0.76 is broken.
|
||||
opencv-python==4.8.0.74; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
|
||||
# todo: check newer versions.
|
||||
opencv-python==4.8.0.74
|
||||
|
||||
# pillow-simd is available on x64 linux
|
||||
# pillow-simd confirmed not building with arm64 linux or apple silicon
|
||||
Pillow>=5.4.1; 'linux' not in sys_platform or platform_machine != 'x86_64'
|
||||
pillow-simd; 'linux' in sys_platform and platform_machine == 'x86_64'
|
||||
|
||||
10
plugins/openvino/package-lock.json
generated
10
plugins/openvino/package-lock.json
generated
@@ -1,25 +1,25 @@
|
||||
{
|
||||
"name": "@scrypted/openvino",
|
||||
"version": "0.1.77",
|
||||
"version": "0.1.80",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/openvino",
|
||||
"version": "0.1.77",
|
||||
"version": "0.1.80",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
},
|
||||
"../../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.2.97",
|
||||
"version": "0.3.29",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
"adm-zip": "^0.4.13",
|
||||
"axios": "^0.21.4",
|
||||
"axios": "^1.6.5",
|
||||
"babel-loader": "^9.1.0",
|
||||
"babel-plugin-const-enum": "^1.1.0",
|
||||
"esbuild": "^0.15.9",
|
||||
@@ -65,7 +65,7 @@
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/stringify-object": "^4.0.0",
|
||||
"adm-zip": "^0.4.13",
|
||||
"axios": "^0.21.4",
|
||||
"axios": "^1.6.5",
|
||||
"babel-loader": "^9.1.0",
|
||||
"babel-plugin-const-enum": "^1.1.0",
|
||||
"esbuild": "^0.15.9",
|
||||
|
||||
@@ -42,5 +42,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.1.77"
|
||||
"version": "0.1.80"
|
||||
}
|
||||
|
||||
@@ -8,12 +8,26 @@ from common.softmax import softmax
|
||||
from common.colors import ensureRGBData
|
||||
import math
|
||||
|
||||
async def crop_text(d: ObjectDetectionResult, image: scrypted_sdk.Image, width: int, height: int):
|
||||
def skew_image(image: Image, skew_angle_rad: float):
|
||||
skew_matrix = [1, 0, 0, skew_angle_rad, 1, 0]
|
||||
|
||||
# Apply the transformation
|
||||
skewed_image = image.transform(
|
||||
image.size, Image.AFFINE, skew_matrix, resample=Image.BICUBIC
|
||||
)
|
||||
|
||||
return skewed_image
|
||||
|
||||
async def crop_text(d: ObjectDetectionResult, image: scrypted_sdk.Image):
|
||||
l, t, w, h = d["boundingBox"]
|
||||
l = math.floor(l)
|
||||
t = math.floor(t)
|
||||
l = max(0, math.floor(l))
|
||||
t = max(0, math.floor(t))
|
||||
w = math.floor(w)
|
||||
h = math.floor(h)
|
||||
if l + w > image.width:
|
||||
w = image.width - l
|
||||
if t + h > image.height:
|
||||
h = image.height - t
|
||||
format = image.format or 'rgb'
|
||||
cropped = await image.toBuffer(
|
||||
{
|
||||
@@ -27,14 +41,30 @@ async def crop_text(d: ObjectDetectionResult, image: scrypted_sdk.Image, width:
|
||||
}
|
||||
)
|
||||
pilImage = await ensureRGBData(cropped, (w, h), format)
|
||||
resized = pilImage.resize((width, height), resample=Image.LANCZOS).convert("L")
|
||||
pilImage.close()
|
||||
return resized
|
||||
return pilImage
|
||||
|
||||
def calculate_y_change(original_height, skew_angle_radians):
|
||||
# Calculate the change in y-position
|
||||
y_change = original_height * math.tan(skew_angle_radians)
|
||||
|
||||
return y_change
|
||||
|
||||
async def prepare_text_result(d: ObjectDetectionResult, image: scrypted_sdk.Image, skew_angle: float):
|
||||
textImage = await crop_text(d, image)
|
||||
|
||||
skew_height_change = calculate_y_change(d["boundingBox"][3], skew_angle)
|
||||
skew_height_change = math.floor(skew_height_change)
|
||||
textImage = skew_image(textImage, skew_angle)
|
||||
# crop skew_height_change from top
|
||||
if skew_height_change > 0:
|
||||
textImage = textImage.crop((0, 0, textImage.width, textImage.height - skew_height_change))
|
||||
elif skew_height_change < 0:
|
||||
textImage = textImage.crop((0, -skew_height_change, textImage.width, textImage.height))
|
||||
|
||||
async def prepare_text_result(d: ObjectDetectionResult, image: scrypted_sdk.Image):
|
||||
new_height = 64
|
||||
new_width = int(d["boundingBox"][2] * new_height / d["boundingBox"][3])
|
||||
textImage = await crop_text(d, image, new_width, new_height)
|
||||
new_width = int(textImage.width * new_height / textImage.height)
|
||||
textImage = textImage.resize((new_width, new_height), resample=Image.LANCZOS).convert("L")
|
||||
|
||||
new_width = 256
|
||||
# calculate padding dimensions
|
||||
padding = (0, 0, new_width - textImage.width, 0)
|
||||
@@ -50,7 +80,6 @@ async def prepare_text_result(d: ObjectDetectionResult, image: scrypted_sdk.Imag
|
||||
# test normalize contrast
|
||||
# image_tensor = (image_tensor - np.min(image_tensor)) / (np.max(image_tensor) - np.min(image_tensor))
|
||||
|
||||
|
||||
image_tensor = (image_tensor - 0.5) / 0.5
|
||||
|
||||
image_tensor = np.expand_dims(image_tensor, axis=0)
|
||||
|
||||
@@ -17,7 +17,11 @@ import common.yolo as yolo
|
||||
from predict import Prediction, PredictPlugin
|
||||
from predict.rectangle import Rectangle
|
||||
|
||||
from .recognition import OpenVINORecognition
|
||||
from .face_recognition import OpenVINOFaceRecognition
|
||||
try:
|
||||
from .text_recognition import OpenVINOTextRecognition
|
||||
except:
|
||||
OpenVINOTextRecognition = None
|
||||
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "OpenVINO-Predict")
|
||||
|
||||
@@ -92,6 +96,22 @@ class OpenVINOPlugin(
|
||||
mode = self.storage.getItem("mode")
|
||||
if mode == "Default":
|
||||
mode = "AUTO"
|
||||
|
||||
dgpus = []
|
||||
# search for NVIDIA dGPU, as that is not preferred by AUTO for some reason?
|
||||
# todo: create separate core per NVIDIA dGPU as inference does not seem to
|
||||
# be distributed to multiple dGPU.
|
||||
for device in self.available_devices:
|
||||
try:
|
||||
full_device_name = self.core.get_property(device, "FULL_DEVICE_NAME")
|
||||
if "NVIDIA" in full_device_name and "dGPU" in full_device_name:
|
||||
dgpus.append(device)
|
||||
except:
|
||||
pass
|
||||
|
||||
if len(dgpus):
|
||||
mode = f"AUTO:{','.join(dgpus)},CPU"
|
||||
|
||||
mode = mode or "AUTO"
|
||||
self.mode = mode
|
||||
|
||||
@@ -326,22 +346,40 @@ class OpenVINOPlugin(
|
||||
|
||||
async def prepareRecognitionModels(self):
|
||||
try:
|
||||
devices = [
|
||||
{
|
||||
"nativeId": "facerecognition",
|
||||
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
|
||||
"interfaces": [
|
||||
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
|
||||
],
|
||||
"name": "OpenVINO Face Recognition",
|
||||
},
|
||||
]
|
||||
|
||||
if OpenVINOTextRecognition:
|
||||
devices.append(
|
||||
{
|
||||
"nativeId": "textrecognition",
|
||||
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
|
||||
"interfaces": [
|
||||
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
|
||||
],
|
||||
"name": "OpenVINO Text Recognition",
|
||||
},
|
||||
)
|
||||
|
||||
await scrypted_sdk.deviceManager.onDevicesChanged(
|
||||
{
|
||||
"devices": [
|
||||
{
|
||||
"nativeId": "recognition",
|
||||
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
|
||||
"interfaces": [
|
||||
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
|
||||
],
|
||||
"name": "OpenVINO Recognition",
|
||||
}
|
||||
]
|
||||
"devices": devices,
|
||||
}
|
||||
)
|
||||
except:
|
||||
pass
|
||||
|
||||
async def getDevice(self, nativeId: str) -> Any:
|
||||
return OpenVINORecognition(self, nativeId)
|
||||
if nativeId == "facerecognition":
|
||||
return OpenVINOFaceRecognition(self, nativeId)
|
||||
elif nativeId == "textrecognition":
|
||||
return OpenVINOTextRecognition(self, nativeId)
|
||||
raise Exception("unknown device")
|
||||
|
||||
@@ -5,7 +5,7 @@ import openvino.runtime as ov
|
||||
|
||||
import numpy as np
|
||||
|
||||
from predict.recognize import RecognizeDetection
|
||||
from predict.face_recognize import FaceRecognizeDetection
|
||||
|
||||
|
||||
def euclidean_distance(arr1, arr2):
|
||||
@@ -19,7 +19,7 @@ def cosine_similarity(vector_a, vector_b):
|
||||
similarity = dot_product / (norm_a * norm_b)
|
||||
return similarity
|
||||
|
||||
class OpenVINORecognition(RecognizeDetection):
|
||||
class OpenVINOFaceRecognition(FaceRecognizeDetection):
|
||||
def __init__(self, plugin, nativeId: str | None = None):
|
||||
self.plugin = plugin
|
||||
|
||||
46
plugins/openvino/src/ov/text_recognition.py
Normal file
46
plugins/openvino/src/ov/text_recognition.py
Normal file
@@ -0,0 +1,46 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import openvino.runtime as ov
|
||||
import numpy as np
|
||||
|
||||
from predict.text_recognize import TextRecognition
|
||||
|
||||
|
||||
class OpenVINOTextRecognition(TextRecognition):
|
||||
def __init__(self, plugin, nativeId: str | None = None):
|
||||
self.plugin = plugin
|
||||
|
||||
super().__init__(nativeId=nativeId)
|
||||
|
||||
def downloadModel(self, model: str):
|
||||
ovmodel = "best"
|
||||
precision = self.plugin.precision
|
||||
model_version = "v5"
|
||||
xmlFile = self.downloadFile(
|
||||
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.xml",
|
||||
f"{model_version}/{model}/{precision}/{ovmodel}.xml",
|
||||
)
|
||||
binFile = self.downloadFile(
|
||||
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.bin",
|
||||
f"{model_version}/{model}/{precision}/{ovmodel}.bin",
|
||||
)
|
||||
print(xmlFile, binFile)
|
||||
return self.plugin.core.compile_model(xmlFile, self.plugin.mode)
|
||||
|
||||
def predictDetectModel(self, input):
|
||||
infer_request = self.detectModel.create_infer_request()
|
||||
im = ov.Tensor(array=input)
|
||||
input_tensor = im
|
||||
infer_request.set_input_tensor(input_tensor)
|
||||
infer_request.start_async()
|
||||
infer_request.wait()
|
||||
return infer_request.output_tensors[0].data
|
||||
|
||||
def predictTextModel(self, input):
|
||||
input = input.astype(np.float32)
|
||||
im = ov.Tensor(array=input)
|
||||
infer_request = self.textModel.create_infer_request()
|
||||
infer_request.set_input_tensor(im)
|
||||
infer_request.start_async()
|
||||
infer_request.wait()
|
||||
return infer_request.output_tensors[0].data
|
||||
1
plugins/openvino/src/requirements.optional.txt
Normal file
1
plugins/openvino/src/requirements.optional.txt
Normal file
@@ -0,0 +1 @@
|
||||
opencv-python
|
||||
@@ -1,6 +1,7 @@
|
||||
# 2024-04-23 - modify timestamp to force pip reinstall
|
||||
openvino==2024.0.0
|
||||
|
||||
# pillow-simd is available on x64 linux
|
||||
# pillow-simd confirmed not building with arm64 linux or apple silicon
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
Pillow>=5.4.1; 'linux' not in sys_platform or platform_machine != 'x86_64'
|
||||
pillow-simd; 'linux' in sys_platform and platform_machine == 'x86_64'
|
||||
|
||||
4
plugins/pam-diff/.gitignore
vendored
4
plugins/pam-diff/.gitignore
vendored
@@ -1,4 +0,0 @@
|
||||
.DS_Store
|
||||
out/
|
||||
node_modules/
|
||||
dist/
|
||||
@@ -1,8 +0,0 @@
|
||||
.DS_Store
|
||||
out/
|
||||
node_modules/
|
||||
*.map
|
||||
fs
|
||||
src
|
||||
.vscode
|
||||
dist/*.js
|
||||
22
plugins/pam-diff/.vscode/launch.json
vendored
22
plugins/pam-diff/.vscode/launch.json
vendored
@@ -1,22 +0,0 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Scrypted Debugger",
|
||||
"address": "${config:scrypted.debugHost}",
|
||||
"port": 10081,
|
||||
"request": "attach",
|
||||
"skipFiles": [
|
||||
"<node_internals>/**"
|
||||
],
|
||||
"preLaunchTask": "scrypted: deploy+debug",
|
||||
"sourceMaps": true,
|
||||
"localRoot": "${workspaceFolder}/out",
|
||||
"remoteRoot": "/plugin/",
|
||||
"type": "pwa-node"
|
||||
}
|
||||
]
|
||||
}
|
||||
4
plugins/pam-diff/.vscode/settings.json
vendored
4
plugins/pam-diff/.vscode/settings.json
vendored
@@ -1,4 +0,0 @@
|
||||
|
||||
{
|
||||
"scrypted.debugHost": "koushik-ubuntu",
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
# Motion Detection Plugin for Scrypted
|
||||
|
||||
The PAM Diff Motion Detection Plugin adds motion detection to any camera. This can also be used with cameras with built in motion detection.
|
||||
|
||||
Motion Detection should only be used if your camera does not have a plugin and does not provide motion
|
||||
events via email or webhooks.
|
||||
|
||||
|
||||
## Setup
|
||||
|
||||
1. Enable the integration on a camera.
|
||||
2. Configure the motion percent and difference to change the sensitivity.
|
||||
201
plugins/pam-diff/package-lock.json
generated
201
plugins/pam-diff/package-lock.json
generated
@@ -1,201 +0,0 @@
|
||||
{
|
||||
"name": "@scrypted/pam-diff",
|
||||
"version": "0.0.24",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/pam-diff",
|
||||
"version": "0.0.24",
|
||||
"dependencies": {
|
||||
"@types/node": "^16.6.1",
|
||||
"pipe2pam": "^0.6.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"pam-diff": "^1.1.0"
|
||||
}
|
||||
},
|
||||
"../../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.2.101",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
"adm-zip": "^0.4.13",
|
||||
"axios": "^0.21.4",
|
||||
"babel-loader": "^9.1.0",
|
||||
"babel-plugin-const-enum": "^1.1.0",
|
||||
"esbuild": "^0.15.9",
|
||||
"ncp": "^2.0.0",
|
||||
"raw-loader": "^4.0.2",
|
||||
"rimraf": "^3.0.2",
|
||||
"tmp": "^0.2.1",
|
||||
"ts-loader": "^9.4.2",
|
||||
"typescript": "^4.9.4",
|
||||
"webpack": "^5.75.0",
|
||||
"webpack-bundle-analyzer": "^4.5.0"
|
||||
},
|
||||
"bin": {
|
||||
"scrypted-changelog": "bin/scrypted-changelog.js",
|
||||
"scrypted-debug": "bin/scrypted-debug.js",
|
||||
"scrypted-deploy": "bin/scrypted-deploy.js",
|
||||
"scrypted-deploy-debug": "bin/scrypted-deploy-debug.js",
|
||||
"scrypted-package-json": "bin/scrypted-package-json.js",
|
||||
"scrypted-setup-project": "bin/scrypted-setup-project.js",
|
||||
"scrypted-webpack": "bin/scrypted-webpack.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/stringify-object": "^4.0.0",
|
||||
"stringify-object": "^3.3.0",
|
||||
"ts-node": "^10.4.0",
|
||||
"typedoc": "^0.23.21"
|
||||
}
|
||||
},
|
||||
"../sdk": {
|
||||
"extraneous": true
|
||||
},
|
||||
"node_modules/@scrypted/sdk": {
|
||||
"resolved": "../../sdk",
|
||||
"link": true
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "16.6.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-16.6.1.tgz",
|
||||
"integrity": "sha512-Sr7BhXEAer9xyGuCN3Ek9eg9xPviCF2gfu9kTfuU2HkTVAMYSDeX40fvpmo72n5nansg3nsBjuQBrsS28r+NUw=="
|
||||
},
|
||||
"node_modules/node-addon-api": {
|
||||
"version": "4.2.0",
|
||||
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-4.2.0.tgz",
|
||||
"integrity": "sha512-eazsqzwG2lskuzBqCGPi7Ac2UgOoMz8JVOXVhTvvPDYhthvNpefx8jWD8Np7Gv+2Sz0FlPWZk0nJV0z598Wn8Q==",
|
||||
"optional": true
|
||||
},
|
||||
"node_modules/node-gyp-build": {
|
||||
"version": "4.3.0",
|
||||
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.3.0.tgz",
|
||||
"integrity": "sha512-iWjXZvmboq0ja1pUGULQBexmxq8CV4xBhX7VDOTbL7ZR4FOowwY/VOtRxBN/yKxmdGoIp4j5ysNT4u3S2pDQ3Q==",
|
||||
"optional": true,
|
||||
"bin": {
|
||||
"node-gyp-build": "bin.js",
|
||||
"node-gyp-build-optional": "optional.js",
|
||||
"node-gyp-build-test": "build-test.js"
|
||||
}
|
||||
},
|
||||
"node_modules/pam-diff": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/pam-diff/-/pam-diff-1.1.0.tgz",
|
||||
"integrity": "sha512-4Xo6u4amQzhMcff372t7UfZBqmXd06av/GDVD6dQWyND7a4nW42ScJf5yr2WYf6JHTdPdVG82cDquuJkGI1FYA==",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"pixel-change": "1.1.0",
|
||||
"polygon-points": "^0.6.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/pipe2pam": {
|
||||
"version": "0.6.2",
|
||||
"resolved": "https://registry.npmjs.org/pipe2pam/-/pipe2pam-0.6.2.tgz",
|
||||
"integrity": "sha512-gUWldPYgNjCp1q8qKpTsSalDqXWaLlaXVO+la1jgiJMbMlokMdOhzNyVAsRKJR23FVyPOAUHdi2YpDfneSOcbw=="
|
||||
},
|
||||
"node_modules/pixel-change": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/pixel-change/-/pixel-change-1.1.0.tgz",
|
||||
"integrity": "sha512-p0J+CXVpeULyzlQTFzRnNcvQnbSn5kOw6qlMWPE09JNybicy/rr6ZC3AS6Z2gKhHINmo62KzynxQNlRIk6YJNQ==",
|
||||
"hasInstallScript": true,
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"node-addon-api": "^4.2.0",
|
||||
"node-gyp-build": "^4.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/polygon-points": {
|
||||
"version": "0.6.0",
|
||||
"resolved": "https://registry.npmjs.org/polygon-points/-/polygon-points-0.6.0.tgz",
|
||||
"integrity": "sha512-GiWcByVNyfbhGbBmiCfUXzXeDy+iMeYFUZ2Cc+ORWRpECcXi+AwyUH82ZT5zRDGIC8iU6jAcs0fFQGp03wbAFA==",
|
||||
"optional": true
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"@scrypted/sdk": {
|
||||
"version": "file:../../sdk",
|
||||
"requires": {
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/stringify-object": "^4.0.0",
|
||||
"adm-zip": "^0.4.13",
|
||||
"axios": "^0.21.4",
|
||||
"babel-loader": "^9.1.0",
|
||||
"babel-plugin-const-enum": "^1.1.0",
|
||||
"esbuild": "^0.15.9",
|
||||
"ncp": "^2.0.0",
|
||||
"raw-loader": "^4.0.2",
|
||||
"rimraf": "^3.0.2",
|
||||
"stringify-object": "^3.3.0",
|
||||
"tmp": "^0.2.1",
|
||||
"ts-loader": "^9.4.2",
|
||||
"ts-node": "^10.4.0",
|
||||
"typedoc": "^0.23.21",
|
||||
"typescript": "^4.9.4",
|
||||
"webpack": "^5.75.0",
|
||||
"webpack-bundle-analyzer": "^4.5.0"
|
||||
}
|
||||
},
|
||||
"@types/node": {
|
||||
"version": "16.6.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-16.6.1.tgz",
|
||||
"integrity": "sha512-Sr7BhXEAer9xyGuCN3Ek9eg9xPviCF2gfu9kTfuU2HkTVAMYSDeX40fvpmo72n5nansg3nsBjuQBrsS28r+NUw=="
|
||||
},
|
||||
"node-addon-api": {
|
||||
"version": "4.2.0",
|
||||
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-4.2.0.tgz",
|
||||
"integrity": "sha512-eazsqzwG2lskuzBqCGPi7Ac2UgOoMz8JVOXVhTvvPDYhthvNpefx8jWD8Np7Gv+2Sz0FlPWZk0nJV0z598Wn8Q==",
|
||||
"optional": true
|
||||
},
|
||||
"node-gyp-build": {
|
||||
"version": "4.3.0",
|
||||
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.3.0.tgz",
|
||||
"integrity": "sha512-iWjXZvmboq0ja1pUGULQBexmxq8CV4xBhX7VDOTbL7ZR4FOowwY/VOtRxBN/yKxmdGoIp4j5ysNT4u3S2pDQ3Q==",
|
||||
"optional": true
|
||||
},
|
||||
"pam-diff": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/pam-diff/-/pam-diff-1.1.0.tgz",
|
||||
"integrity": "sha512-4Xo6u4amQzhMcff372t7UfZBqmXd06av/GDVD6dQWyND7a4nW42ScJf5yr2WYf6JHTdPdVG82cDquuJkGI1FYA==",
|
||||
"optional": true,
|
||||
"requires": {
|
||||
"pixel-change": "1.1.0",
|
||||
"polygon-points": "^0.6.0"
|
||||
}
|
||||
},
|
||||
"pipe2pam": {
|
||||
"version": "0.6.2",
|
||||
"resolved": "https://registry.npmjs.org/pipe2pam/-/pipe2pam-0.6.2.tgz",
|
||||
"integrity": "sha512-gUWldPYgNjCp1q8qKpTsSalDqXWaLlaXVO+la1jgiJMbMlokMdOhzNyVAsRKJR23FVyPOAUHdi2YpDfneSOcbw=="
|
||||
},
|
||||
"pixel-change": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/pixel-change/-/pixel-change-1.1.0.tgz",
|
||||
"integrity": "sha512-p0J+CXVpeULyzlQTFzRnNcvQnbSn5kOw6qlMWPE09JNybicy/rr6ZC3AS6Z2gKhHINmo62KzynxQNlRIk6YJNQ==",
|
||||
"optional": true,
|
||||
"requires": {
|
||||
"node-addon-api": "^4.2.0",
|
||||
"node-gyp-build": "^4.3.0"
|
||||
}
|
||||
},
|
||||
"polygon-points": {
|
||||
"version": "0.6.0",
|
||||
"resolved": "https://registry.npmjs.org/polygon-points/-/polygon-points-0.6.0.tgz",
|
||||
"integrity": "sha512-GiWcByVNyfbhGbBmiCfUXzXeDy+iMeYFUZ2Cc+ORWRpECcXi+AwyUH82ZT5zRDGIC8iU6jAcs0fFQGp03wbAFA==",
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
{
|
||||
"name": "@scrypted/pam-diff",
|
||||
"keywords": [
|
||||
"scrypted",
|
||||
"plugin",
|
||||
"motion",
|
||||
"detect",
|
||||
"detection",
|
||||
"pamdiff",
|
||||
"pam",
|
||||
"diff"
|
||||
],
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
"prescrypted-setup-project": "scrypted-package-json",
|
||||
"build": "scrypted-webpack",
|
||||
"prepublishOnly": "NODE_ENV=production scrypted-webpack",
|
||||
"prescrypted-vscode-launch": "scrypted-webpack",
|
||||
"scrypted-vscode-launch": "scrypted-deploy-debug",
|
||||
"scrypted-deploy-debug": "scrypted-deploy-debug",
|
||||
"scrypted-debug": "scrypted-debug",
|
||||
"scrypted-deploy": "scrypted-deploy",
|
||||
"scrypted-readme": "scrypted-readme",
|
||||
"scrypted-package-json": "scrypted-package-json"
|
||||
},
|
||||
"scrypted": {
|
||||
"name": "PAM Diff Motion Detection",
|
||||
"type": "API",
|
||||
"interfaces": [
|
||||
"ObjectDetection",
|
||||
"ObjectDetectionGenerator"
|
||||
],
|
||||
"pluginDependencies": [
|
||||
"@scrypted/objectdetector"
|
||||
]
|
||||
},
|
||||
"dependencies": {
|
||||
"@types/node": "^16.6.1",
|
||||
"pipe2pam": "^0.6.2"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"pam-diff": "^1.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.0.24"
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
import sdk, { MediaObject, ObjectDetection, ObjectDetectionGeneratorResult, ObjectDetectionGeneratorSession, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionSession, ObjectsDetected, ScryptedDeviceBase, VideoFrame } from '@scrypted/sdk';
|
||||
|
||||
import PD from 'pam-diff';
|
||||
import P2P from 'pipe2pam';
|
||||
import { PassThrough, Writable } from 'stream';
|
||||
|
||||
const defaultDifference = 9;
|
||||
const defaultPercentage = 2;
|
||||
|
||||
|
||||
class PamDiff extends ScryptedDeviceBase implements ObjectDetection {
|
||||
|
||||
|
||||
async * generateObjectDetectionsInternal(videoFrames: AsyncGenerator<VideoFrame, any, unknown>, session: ObjectDetectionGeneratorSession): AsyncGenerator<ObjectDetectionGeneratorResult, any, unknown> {
|
||||
videoFrames = await sdk.connectRPCObject(videoFrames);
|
||||
|
||||
const width = 640;
|
||||
const height = 360;
|
||||
const p2p: Writable = new P2P();
|
||||
const pt = new PassThrough();
|
||||
const pamDiff = new PD({
|
||||
difference: parseInt(session.settings?.difference) || defaultDifference,
|
||||
percent: parseInt(session.settings?.percent) || defaultPercentage,
|
||||
response: session?.settings?.motionAsObjects ? 'blobs' : 'percent',
|
||||
});
|
||||
pt.pipe(p2p).pipe(pamDiff);
|
||||
|
||||
const queued: ObjectsDetected[] = [];
|
||||
pamDiff.on('diff', async (data: any) => {
|
||||
const trigger = data.trigger[0];
|
||||
// console.log(trigger.blobs.length);
|
||||
const { blobs } = trigger;
|
||||
|
||||
const detections: ObjectDetectionResult[] = [];
|
||||
if (blobs?.length) {
|
||||
for (const blob of blobs) {
|
||||
detections.push(
|
||||
{
|
||||
className: 'motion',
|
||||
score: 1,
|
||||
boundingBox: [blob.minX, blob.minY, blob.maxX - blob.minX, blob.maxY - blob.minY],
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
else {
|
||||
detections.push(
|
||||
{
|
||||
className: 'motion',
|
||||
score: trigger.percent / 100,
|
||||
}
|
||||
)
|
||||
}
|
||||
const event: ObjectsDetected = {
|
||||
timestamp: Date.now(),
|
||||
inputDimensions: [width, height],
|
||||
detections,
|
||||
}
|
||||
queued.push(event);
|
||||
});
|
||||
|
||||
|
||||
for await (const videoFrame of videoFrames) {
|
||||
const header = `P7
|
||||
WIDTH ${width}
|
||||
HEIGHT ${height}
|
||||
DEPTH 3
|
||||
MAXVAL 255
|
||||
TUPLTYPE RGB
|
||||
ENDHDR
|
||||
`;
|
||||
|
||||
const { image } = videoFrame;
|
||||
|
||||
const buffer = await image.toBuffer({
|
||||
resize: (image.width !== width || image.height !== height) ? {
|
||||
width,
|
||||
height,
|
||||
} : undefined,
|
||||
format: 'rgb',
|
||||
});
|
||||
pt.write(Buffer.from(header));
|
||||
pt.write(buffer);
|
||||
|
||||
if (!queued.length) {
|
||||
yield {
|
||||
__json_copy_serialize_children: true,
|
||||
videoFrame,
|
||||
detected: {
|
||||
timestamp: Date.now(),
|
||||
detections: [],
|
||||
}
|
||||
}
|
||||
}
|
||||
while (queued.length) {
|
||||
yield {
|
||||
__json_copy_serialize_children: true,
|
||||
detected: queued.pop(),
|
||||
videoFrame,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async generateObjectDetections(videoFrames: AsyncGenerator<VideoFrame, any, unknown>, session: ObjectDetectionGeneratorSession): Promise<AsyncGenerator<ObjectDetectionGeneratorResult, any, unknown>> {
|
||||
return this.generateObjectDetectionsInternal(videoFrames, session);
|
||||
}
|
||||
|
||||
async detectObjects(mediaObject: MediaObject, session?: ObjectDetectionSession): Promise<ObjectsDetected> {
|
||||
throw new Error('can not run motion detection on image')
|
||||
}
|
||||
|
||||
async getDetectionModel(): Promise<ObjectDetectionModel> {
|
||||
return {
|
||||
name: '@scrypted/pam-diff',
|
||||
classes: ['motion'],
|
||||
inputFormat: 'rgb',
|
||||
inputSize: [640, 360],
|
||||
settings: [
|
||||
{
|
||||
title: 'Motion Difference',
|
||||
description: 'The color difference required to trigger motion on a pixel.',
|
||||
key: 'difference',
|
||||
value: this.storage.getItem('difference') || defaultDifference,
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
title: 'Motion Percent',
|
||||
description: 'The percentage of pixels required to trigger motion',
|
||||
key: 'percent',
|
||||
value: this.storage.getItem('percent]') || defaultPercentage,
|
||||
type: 'number',
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default PamDiff;
|
||||
4
plugins/prebuffer-mixin/package-lock.json
generated
4
plugins/prebuffer-mixin/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/prebuffer-mixin",
|
||||
"version": "0.10.17",
|
||||
"version": "0.10.18",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/prebuffer-mixin",
|
||||
"version": "0.10.17",
|
||||
"version": "0.10.18",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/prebuffer-mixin",
|
||||
"version": "0.10.17",
|
||||
"version": "0.10.18",
|
||||
"description": "Video Stream Rebroadcast, Prebuffer, and Management Plugin for Scrypted.",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -1596,10 +1596,11 @@ export class RebroadcastPlugin extends AutoenableMixinProvider implements MixinP
|
||||
});
|
||||
|
||||
// schedule restarts at 2am
|
||||
const midnight = millisUntilMidnight();
|
||||
const twoAM = midnight + 2 * 60 * 60 * 1000;
|
||||
this.log.i(`Rebroadcaster scheduled for restart at 2AM: ${Math.round(twoAM / 1000 / 60)} minutes`)
|
||||
setTimeout(() => deviceManager.requestRestart(), twoAM);
|
||||
// removed as the mp4 containerization leak used way back when is defunct.
|
||||
// const midnight = millisUntilMidnight();
|
||||
// const twoAM = midnight + 2 * 60 * 60 * 1000;
|
||||
// this.log.i(`Rebroadcaster scheduled for restart at 2AM: ${Math.round(twoAM / 1000 / 60)} minutes`)
|
||||
// setTimeout(() => deviceManager.requestRestart(), twoAM);
|
||||
|
||||
process.nextTick(() => {
|
||||
deviceManager.onDeviceDiscovered({
|
||||
|
||||
@@ -5,5 +5,5 @@ av>=10.0.0
|
||||
|
||||
# in case pyvips fails to load, use a pillow fallback.
|
||||
# pillow for anything not intel linux, pillow-simd is available on x64 linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
Pillow>=5.4.1; 'linux' not in sys_platform or platform_machine != 'x86_64'
|
||||
pillow-simd; 'linux' in sys_platform and platform_machine == 'x86_64'
|
||||
|
||||
@@ -9,4 +9,7 @@ dist/*.js
|
||||
dist/*.txt
|
||||
__pycache__
|
||||
all_models
|
||||
sort_oh
|
||||
download_models.sh
|
||||
tsconfig.json
|
||||
.venv
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
{
|
||||
// docker installation
|
||||
// "scrypted.debugHost": "koushik-thin",
|
||||
// "scrypted.debugHost": "koushik-ubuntu",
|
||||
// "scrypted.serverRoot": "/server",
|
||||
|
||||
// pi local installation
|
||||
@@ -11,9 +11,9 @@
|
||||
// local checkout
|
||||
"scrypted.debugHost": "127.0.0.1",
|
||||
"scrypted.serverRoot": "/Users/koush/.scrypted",
|
||||
// "scrypted.debugHost": "koushik-windows",
|
||||
// "scrypted.debugHost": "koushik-winvm",
|
||||
// "scrypted.serverRoot": "C:\\Users\\koush\\.scrypted",
|
||||
|
||||
|
||||
"scrypted.pythonRemoteRoot": "${config:scrypted.serverRoot}/volume/plugin.zip",
|
||||
"python.analysis.extraPaths": [
|
||||
"./node_modules/@scrypted/sdk/types/scrypted_python"
|
||||
5
plugins/rknn/README.md
Normal file
5
plugins/rknn/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Rockchip NPU Object Detection for Scrypted
|
||||
|
||||
This plugin adds object detection capabilities to any camera in Scrypted using the NPU accelerator on ARM64 Rockchip CPUs. Functionality has been tested on RK3588S, but should also work on RK3562, RK3576, and RK3588.
|
||||
|
||||
Using this plugin in Docker requires Docker to be run with the `--security-opt systempaths=unconfined` flag due to a dependency on the `/proc/device-tree/compatible` file. Additionally, use the Docker flag `--device /dev/dri:/dev/dri` to ensure that the `/dev/dri/renderD129` device is accessible. When using this plugin in a local install, ensure you have installed Rockchip's `librknnrt.so` as `/usr/lib/librknnrt.so`.
|
||||
@@ -1,47 +1,48 @@
|
||||
{
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.0.18",
|
||||
"name": "@scrypted/rknn",
|
||||
"version": "0.0.4",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.0.18",
|
||||
"name": "@scrypted/rknn",
|
||||
"version": "0.0.4",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
},
|
||||
"../../sdk": {
|
||||
"name": "@scrypted/sdk",
|
||||
"version": "0.2.39",
|
||||
"version": "0.3.29",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@babel/preset-typescript": "^7.16.7",
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
"adm-zip": "^0.4.13",
|
||||
"axios": "^0.21.4",
|
||||
"babel-loader": "^8.2.3",
|
||||
"axios": "^1.6.5",
|
||||
"babel-loader": "^9.1.0",
|
||||
"babel-plugin-const-enum": "^1.1.0",
|
||||
"esbuild": "^0.15.9",
|
||||
"ncp": "^2.0.0",
|
||||
"raw-loader": "^4.0.2",
|
||||
"rimraf": "^3.0.2",
|
||||
"tmp": "^0.2.1",
|
||||
"typescript": "^4.9.3",
|
||||
"webpack": "^5.74.0",
|
||||
"ts-loader": "^9.4.2",
|
||||
"typescript": "^4.9.4",
|
||||
"webpack": "^5.75.0",
|
||||
"webpack-bundle-analyzer": "^4.5.0"
|
||||
},
|
||||
"bin": {
|
||||
"scrypted-changelog": "bin/scrypted-changelog.js",
|
||||
"scrypted-debug": "bin/scrypted-debug.js",
|
||||
"scrypted-deploy": "bin/scrypted-deploy.js",
|
||||
"scrypted-deploy-debug": "bin/scrypted-deploy-debug.js",
|
||||
"scrypted-package-json": "bin/scrypted-package-json.js",
|
||||
"scrypted-readme": "bin/scrypted-readme.js",
|
||||
"scrypted-setup-project": "bin/scrypted-setup-project.js",
|
||||
"scrypted-webpack": "bin/scrypted-webpack.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^18.11.9",
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/stringify-object": "^4.0.0",
|
||||
"stringify-object": "^3.3.0",
|
||||
"ts-node": "^10.4.0",
|
||||
@@ -60,12 +61,12 @@
|
||||
"@scrypted/sdk": {
|
||||
"version": "file:../../sdk",
|
||||
"requires": {
|
||||
"@babel/preset-typescript": "^7.16.7",
|
||||
"@types/node": "^18.11.9",
|
||||
"@babel/preset-typescript": "^7.18.6",
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/stringify-object": "^4.0.0",
|
||||
"adm-zip": "^0.4.13",
|
||||
"axios": "^0.21.4",
|
||||
"babel-loader": "^8.2.3",
|
||||
"axios": "^1.6.5",
|
||||
"babel-loader": "^9.1.0",
|
||||
"babel-plugin-const-enum": "^1.1.0",
|
||||
"esbuild": "^0.15.9",
|
||||
"ncp": "^2.0.0",
|
||||
@@ -73,10 +74,11 @@
|
||||
"rimraf": "^3.0.2",
|
||||
"stringify-object": "^3.3.0",
|
||||
"tmp": "^0.2.1",
|
||||
"ts-loader": "^9.4.2",
|
||||
"ts-node": "^10.4.0",
|
||||
"typedoc": "^0.23.21",
|
||||
"typescript": "^4.9.3",
|
||||
"webpack": "^5.74.0",
|
||||
"typescript": "^4.9.4",
|
||||
"webpack": "^5.75.0",
|
||||
"webpack-bundle-analyzer": "^4.5.0"
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,16 @@
|
||||
{
|
||||
"name": "@scrypted/dlib",
|
||||
"description": "Scrypted Face Recognition",
|
||||
"name": "@scrypted/rknn",
|
||||
"description": "Scrypted Rockchip NPU Object Detection",
|
||||
"keywords": [
|
||||
"scrypted",
|
||||
"plugin",
|
||||
"dlib",
|
||||
"face",
|
||||
"rknn",
|
||||
"rockchip",
|
||||
"npu",
|
||||
"motion",
|
||||
"object",
|
||||
"detect",
|
||||
"detection",
|
||||
"recognition",
|
||||
"people",
|
||||
"person"
|
||||
],
|
||||
@@ -26,21 +28,22 @@
|
||||
"scrypted-package-json": "scrypted-package-json"
|
||||
},
|
||||
"scrypted": {
|
||||
"name": "Dlib Face Recognition",
|
||||
"name": "Rockchip NPU Object Detection",
|
||||
"pluginDependencies": [
|
||||
"@scrypted/objectdetector"
|
||||
],
|
||||
"runtime": "python",
|
||||
"pythonVersion": {
|
||||
"default": "3.10"
|
||||
},
|
||||
"type": "API",
|
||||
"interfaces": [
|
||||
"Camera",
|
||||
"Settings",
|
||||
"BufferConverter",
|
||||
"ObjectDetection"
|
||||
"ObjectDetection",
|
||||
"ObjectDetectionPreview"
|
||||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.0.1"
|
||||
"version": "0.0.4"
|
||||
}
|
||||
1
plugins/rknn/src/common
Symbolic link
1
plugins/rknn/src/common
Symbolic link
@@ -0,0 +1 @@
|
||||
../../openvino/src/common/
|
||||
1
plugins/rknn/src/detect
Symbolic link
1
plugins/rknn/src/detect
Symbolic link
@@ -0,0 +1 @@
|
||||
../../openvino/src/detect
|
||||
4
plugins/rknn/src/main.py
Normal file
4
plugins/rknn/src/main.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from rknn import RKNNPlugin
|
||||
|
||||
def create_scrypted_plugin():
|
||||
return RKNNPlugin()
|
||||
1
plugins/rknn/src/predict
Symbolic link
1
plugins/rknn/src/predict
Symbolic link
@@ -0,0 +1 @@
|
||||
../../openvino/src/predict
|
||||
2
plugins/rknn/src/requirements.txt
Normal file
2
plugins/rknn/src/requirements.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
https://github.com/airockchip/rknn-toolkit2/raw/v2.0.0-beta0/rknn-toolkit-lite2/packages/rknn_toolkit_lite2-2.0.0b0-cp310-cp310-linux_aarch64.whl
|
||||
pillow==10.3.0
|
||||
1
plugins/rknn/src/rknn/__init__.py
Normal file
1
plugins/rknn/src/rknn/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .plugin import RKNNPlugin
|
||||
155
plugins/rknn/src/rknn/optimized/yolo.py
Normal file
155
plugins/rknn/src/rknn/optimized/yolo.py
Normal file
@@ -0,0 +1,155 @@
|
||||
# adapted from https://github.com/airockchip/rknn_model_zoo/blob/eaa94d6f57ca553d493bf3bd7399a070452d2774/examples/yolov6/python/yolov6.py
|
||||
|
||||
import numpy as np
|
||||
|
||||
from common.softmax import softmax
|
||||
|
||||
|
||||
OBJ_THRESH = 0.25
|
||||
NMS_THRESH = 0.45
|
||||
|
||||
IMG_SIZE = (640, 640) # (width, height), such as (1280, 736)
|
||||
|
||||
CLASSES = ("person", "bicycle", "car","motorbike ","aeroplane ","bus ","train","truck ","boat","traffic light",
|
||||
"fire hydrant","stop sign ","parking meter","bench","bird","cat","dog ","horse ","sheep","cow","elephant",
|
||||
"bear","zebra ","giraffe","backpack","umbrella","handbag","tie","suitcase","frisbee","skis","snowboard","sports ball","kite",
|
||||
"baseball bat","baseball glove","skateboard","surfboard","tennis racket","bottle","wine glass","cup","fork","knife ",
|
||||
"spoon","bowl","banana","apple","sandwich","orange","broccoli","carrot","hot dog","pizza ","donut","cake","chair","sofa",
|
||||
"pottedplant","bed","diningtable","toilet ","tvmonitor","laptop ","mouse ","remote ","keyboard ","cell phone","microwave ",
|
||||
"oven ","toaster","sink","refrigerator ","book","clock","vase","scissors ","teddy bear ","hair drier", "toothbrush ")
|
||||
|
||||
coco_id_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
|
||||
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
|
||||
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
|
||||
|
||||
|
||||
def filter_boxes(boxes, box_confidences, box_class_probs):
|
||||
"""Filter boxes with object threshold.
|
||||
"""
|
||||
box_confidences = box_confidences.reshape(-1)
|
||||
candidate, class_num = box_class_probs.shape
|
||||
|
||||
class_max_score = np.max(box_class_probs, axis=-1)
|
||||
classes = np.argmax(box_class_probs, axis=-1)
|
||||
|
||||
_class_pos = np.where(class_max_score* box_confidences >= OBJ_THRESH)
|
||||
scores = (class_max_score* box_confidences)[_class_pos]
|
||||
|
||||
boxes = boxes[_class_pos]
|
||||
classes = classes[_class_pos]
|
||||
|
||||
return boxes, classes, scores
|
||||
|
||||
def nms_boxes(boxes, scores):
|
||||
"""Suppress non-maximal boxes.
|
||||
# Returns
|
||||
keep: ndarray, index of effective boxes.
|
||||
"""
|
||||
x = boxes[:, 0]
|
||||
y = boxes[:, 1]
|
||||
w = boxes[:, 2] - boxes[:, 0]
|
||||
h = boxes[:, 3] - boxes[:, 1]
|
||||
|
||||
areas = w * h
|
||||
order = scores.argsort()[::-1]
|
||||
|
||||
keep = []
|
||||
while order.size > 0:
|
||||
i = order[0]
|
||||
keep.append(i)
|
||||
|
||||
xx1 = np.maximum(x[i], x[order[1:]])
|
||||
yy1 = np.maximum(y[i], y[order[1:]])
|
||||
xx2 = np.minimum(x[i] + w[i], x[order[1:]] + w[order[1:]])
|
||||
yy2 = np.minimum(y[i] + h[i], y[order[1:]] + h[order[1:]])
|
||||
|
||||
w1 = np.maximum(0.0, xx2 - xx1 + 0.00001)
|
||||
h1 = np.maximum(0.0, yy2 - yy1 + 0.00001)
|
||||
inter = w1 * h1
|
||||
|
||||
ovr = inter / (areas[i] + areas[order[1:]] - inter)
|
||||
inds = np.where(ovr <= NMS_THRESH)[0]
|
||||
order = order[inds + 1]
|
||||
keep = np.array(keep)
|
||||
return keep
|
||||
|
||||
def dfl(position):
|
||||
# Distribution Focal Loss (DFL)
|
||||
x = np.array(position)
|
||||
n,c,h,w = x.shape
|
||||
p_num = 4
|
||||
mc = c//p_num
|
||||
y = x.reshape(n,p_num,mc,h,w)
|
||||
y = softmax(y, axis=2)
|
||||
acc_metrix = np.arange(mc).reshape(1,1,mc,1,1)
|
||||
y = (y*acc_metrix).sum(2)
|
||||
return y
|
||||
|
||||
def box_process(position):
|
||||
grid_h, grid_w = position.shape[2:4]
|
||||
col, row = np.meshgrid(np.arange(0, grid_w), np.arange(0, grid_h))
|
||||
col = col.reshape(1, 1, grid_h, grid_w)
|
||||
row = row.reshape(1, 1, grid_h, grid_w)
|
||||
grid = np.concatenate((col, row), axis=1)
|
||||
stride = np.array([IMG_SIZE[1]//grid_h, IMG_SIZE[0]//grid_w]).reshape(1,2,1,1)
|
||||
|
||||
if position.shape[1]==4:
|
||||
box_xy = grid +0.5 -position[:,0:2,:,:]
|
||||
box_xy2 = grid +0.5 +position[:,2:4,:,:]
|
||||
xyxy = np.concatenate((box_xy*stride, box_xy2*stride), axis=1)
|
||||
else:
|
||||
position = dfl(position)
|
||||
box_xy = grid +0.5 -position[:,0:2,:,:]
|
||||
box_xy2 = grid +0.5 +position[:,2:4,:,:]
|
||||
xyxy = np.concatenate((box_xy*stride, box_xy2*stride), axis=1)
|
||||
|
||||
return xyxy
|
||||
|
||||
def post_process(input_data):
|
||||
boxes, scores, classes_conf = [], [], []
|
||||
defualt_branch=3
|
||||
pair_per_branch = len(input_data)//defualt_branch
|
||||
# Python 忽略 score_sum 输出
|
||||
for i in range(defualt_branch):
|
||||
boxes.append(box_process(input_data[pair_per_branch*i]))
|
||||
classes_conf.append(input_data[pair_per_branch*i+1])
|
||||
scores.append(np.ones_like(input_data[pair_per_branch*i+1][:,:1,:,:], dtype=np.float32))
|
||||
|
||||
def sp_flatten(_in):
|
||||
ch = _in.shape[1]
|
||||
_in = _in.transpose(0,2,3,1)
|
||||
return _in.reshape(-1, ch)
|
||||
|
||||
boxes = [sp_flatten(_v) for _v in boxes]
|
||||
classes_conf = [sp_flatten(_v) for _v in classes_conf]
|
||||
scores = [sp_flatten(_v) for _v in scores]
|
||||
|
||||
boxes = np.concatenate(boxes)
|
||||
classes_conf = np.concatenate(classes_conf)
|
||||
scores = np.concatenate(scores)
|
||||
|
||||
# filter according to threshold
|
||||
boxes, classes, scores = filter_boxes(boxes, scores, classes_conf)
|
||||
|
||||
# nms
|
||||
nboxes, nclasses, nscores = [], [], []
|
||||
for c in set(classes):
|
||||
inds = np.where(classes == c)
|
||||
b = boxes[inds]
|
||||
c = classes[inds]
|
||||
s = scores[inds]
|
||||
keep = nms_boxes(b, s)
|
||||
|
||||
if len(keep) != 0:
|
||||
nboxes.append(b[keep])
|
||||
nclasses.append(c[keep])
|
||||
nscores.append(s[keep])
|
||||
|
||||
if not nclasses and not nscores:
|
||||
return [], [], []
|
||||
|
||||
boxes = np.concatenate(nboxes)
|
||||
classes = np.concatenate(nclasses)
|
||||
scores = np.concatenate(nscores)
|
||||
|
||||
return boxes, classes, scores
|
||||
134
plugins/rknn/src/rknn/plugin.py
Normal file
134
plugins/rknn/src/rknn/plugin.py
Normal file
@@ -0,0 +1,134 @@
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import os
|
||||
import platform
|
||||
import queue
|
||||
import threading
|
||||
from typing import Any, Coroutine, List, Tuple
|
||||
import urllib.request
|
||||
|
||||
import numpy as np
|
||||
from PIL.Image import Image
|
||||
from rknnlite.api import RKNNLite
|
||||
|
||||
from predict import PredictPlugin, Prediction
|
||||
from predict.rectangle import Rectangle
|
||||
|
||||
# for Rockchip-optimized models, the postprocessing is slightly different from the original models
|
||||
from .optimized.yolo import post_process, IMG_SIZE, CLASSES
|
||||
|
||||
|
||||
rknn_verbose = False
|
||||
lib_download = 'https://github.com/airockchip/rknn-toolkit2/raw/v2.0.0-beta0/rknpu2/runtime/Linux/librknn_api/aarch64/librknnrt.so'
|
||||
model_download_tmpl = 'https://github.com/bjia56/scrypted-rknn/raw/main/models/{}_{}_optimized.rknn'
|
||||
lib_path = '/usr/lib/librknnrt.so'
|
||||
|
||||
|
||||
def ensure_compatibility_and_get_cpu():
|
||||
err_msg = 'RKNN plugin is only supported on Linux/ARM64 platform with a Rockchip CPU'
|
||||
if platform.machine() != 'aarch64':
|
||||
raise RuntimeError(err_msg)
|
||||
|
||||
if platform.system() != 'Linux':
|
||||
raise RuntimeError(err_msg)
|
||||
|
||||
try:
|
||||
with open('/proc/device-tree/compatible') as f:
|
||||
device_compatible_str = f.read()
|
||||
if 'rk3562' in device_compatible_str:
|
||||
return 'rk3562'
|
||||
elif 'rk3566' in device_compatible_str:
|
||||
return 'rk3566'
|
||||
elif 'rk3568' in device_compatible_str:
|
||||
return 'rk3568'
|
||||
elif 'rk3576' in device_compatible_str:
|
||||
return 'rk3576'
|
||||
elif 'rk3588' in device_compatible_str:
|
||||
return 'rk3588'
|
||||
else:
|
||||
raise RuntimeError(err_msg)
|
||||
except IOError as e:
|
||||
print('Failed to read /proc/device-tree/compatible: {}'.format(e))
|
||||
print('If you are running this via Docker, ensure you are launching the container with --privileged option')
|
||||
raise
|
||||
|
||||
|
||||
class RKNNPlugin(PredictPlugin):
|
||||
labels = {i: CLASSES[i] for i in range(len(CLASSES))}
|
||||
rknn_runtimes: dict
|
||||
|
||||
def __init__(self, nativeId=None):
|
||||
super().__init__(nativeId)
|
||||
cpu = ensure_compatibility_and_get_cpu()
|
||||
model = 'yolov6n'
|
||||
|
||||
self.rknn_runtimes = {}
|
||||
|
||||
if not os.path.exists(lib_path):
|
||||
installation = os.environ.get('SCRYPTED_INSTALL_ENVIRONMENT')
|
||||
if installation in ('docker', 'lxc'):
|
||||
print('Downloading librknnrt.so from {}'.format(lib_download))
|
||||
urllib.request.urlretrieve(lib_download, lib_path)
|
||||
else:
|
||||
raise RuntimeError('librknnrt.so not found. Please download it from {} and place it at {}'.format(lib_download, lib_path))
|
||||
|
||||
model_download = model_download_tmpl.format(model, cpu)
|
||||
model_file = os.path.basename(model_download)
|
||||
model_path = self.downloadFile(model_download, model_file)
|
||||
print('Using model {}'.format(model_path))
|
||||
|
||||
test_rknn = RKNNLite(verbose=rknn_verbose)
|
||||
ret = test_rknn.load_rknn(model_path)
|
||||
if ret != 0:
|
||||
raise RuntimeError('Failed to load model: {}'.format(ret))
|
||||
|
||||
ret = test_rknn.init_runtime()
|
||||
if ret != 0:
|
||||
raise RuntimeError('Failed to init runtime: {}'.format(ret))
|
||||
test_rknn.release()
|
||||
|
||||
def executor_initializer():
|
||||
thread_name = threading.current_thread().name
|
||||
rknn = RKNNLite(verbose=rknn_verbose)
|
||||
ret = rknn.load_rknn(model_path)
|
||||
if ret != 0:
|
||||
raise RuntimeError('Failed to load model: {}'.format(ret))
|
||||
|
||||
ret = rknn.init_runtime()
|
||||
if ret != 0:
|
||||
raise RuntimeError('Failed to init runtime: {}'.format(ret))
|
||||
|
||||
self.rknn_runtimes[thread_name] = rknn
|
||||
print('RKNNLite runtime initialized on thread {}'.format(thread_name))
|
||||
|
||||
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=3, initializer=executor_initializer)
|
||||
|
||||
def get_input_details(self) -> Tuple[int]:
|
||||
return (IMG_SIZE[0], IMG_SIZE[1], 3)
|
||||
|
||||
def get_input_size(self) -> Tuple[int, int]:
|
||||
return IMG_SIZE
|
||||
|
||||
async def detect_once(self, input: Image, settings: Any, src_size, cvss) -> Coroutine[Any, Any, Any]:
|
||||
def inference(input_tensor):
|
||||
rknn = self.rknn_runtimes[threading.current_thread().name]
|
||||
outputs = rknn.inference(inputs=[input_tensor])
|
||||
return outputs
|
||||
|
||||
async def predict(input_tensor):
|
||||
fut = asyncio.wrap_future(self.executor.submit(inference, input_tensor))
|
||||
outputs = await fut
|
||||
boxes, classes, scores = post_process(outputs)
|
||||
|
||||
predictions: List[Prediction] = []
|
||||
for i in range(len(classes)):
|
||||
#print(CLASSES[classes[i]], scores[i])
|
||||
predictions.append(Prediction(
|
||||
classes[i],
|
||||
float(scores[i]),
|
||||
Rectangle(float(boxes[i][0]), float(boxes[i][1]), float(boxes[i][2]), float(boxes[i][3]))
|
||||
))
|
||||
|
||||
return self.create_detection_result(predictions, src_size, cvss)
|
||||
input_tensor = np.expand_dims(np.asarray(input), axis=0)
|
||||
return await predict(input_tensor)
|
||||
4
plugins/tapo/package-lock.json
generated
4
plugins/tapo/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/tapo",
|
||||
"version": "0.0.11",
|
||||
"version": "0.0.13",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/tapo",
|
||||
"version": "0.0.11",
|
||||
"version": "0.0.13",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
"@scrypted/sdk": "file:../../sdk",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/tapo",
|
||||
"version": "0.0.11",
|
||||
"version": "0.0.13",
|
||||
"description": "Tapo Camera Plugin for Scrypted",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { SettingsMixinDeviceBase } from '@scrypted/common/src/settings-mixin';
|
||||
import sdk, { DeviceProvider, DeviceState, FFmpegInput, Intercom, MediaObject, MixinProvider, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting, Settings, SettingValue, VideoCamera } from '@scrypted/sdk';
|
||||
import sdk, { DeviceProvider, FFmpegInput, Intercom, MediaObject, MixinProvider, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting, Settings, SettingValue, VideoCamera, WritableDeviceState } from '@scrypted/sdk';
|
||||
import { StorageSettings } from '@scrypted/sdk/storage-settings';
|
||||
import { startRtpForwarderProcess } from '../../webrtc/src/rtp-forwarders';
|
||||
import { MpegTSWriter, StreamTypePCMATapo } from './mpegts-writer';
|
||||
@@ -97,7 +97,7 @@ class TapoIntercom extends ScryptedDeviceBase implements MixinProvider {
|
||||
]
|
||||
}
|
||||
|
||||
async getMixin(mixinDevice: any, mixinDeviceInterfaces: ScryptedInterface[], mixinDeviceState: DeviceState): Promise<any> {
|
||||
async getMixin(mixinDevice: any, mixinDeviceInterfaces: ScryptedInterface[], mixinDeviceState: WritableDeviceState): Promise<any> {
|
||||
return new TapoIntercomMixin({
|
||||
mixinProviderNativeId: this.nativeId,
|
||||
group: 'Tapo Two Way Audio',
|
||||
|
||||
@@ -4,7 +4,6 @@ import { readLine } from '@scrypted/common/src/read-stream';
|
||||
import { parseHeaders, readBody, readMessage, writeMessage } from '@scrypted/common/src/rtsp-server';
|
||||
import crypto from 'crypto';
|
||||
import { Duplex, PassThrough, Writable } from 'stream';
|
||||
import { BufferParser, StreamParser } from '../../../server/src/http-fetch-helpers';
|
||||
import { digestAuthHeader } from './digest-auth';
|
||||
|
||||
export function getTapoAdminPassword(cloudPassword: string, useSHA256: boolean) {
|
||||
@@ -32,17 +31,17 @@ export class TapoAPI {
|
||||
credential: undefined,
|
||||
url: url,
|
||||
ignoreStatusCode: true,
|
||||
}, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'multipart/mixed; boundary=--client-stream-boundary--',
|
||||
},
|
||||
}, BufferParser);
|
||||
responseType: 'buffer',
|
||||
});
|
||||
|
||||
if (response.statusCode !== 401)
|
||||
throw new Error('Expected 401 status code for two way audio init')
|
||||
|
||||
const wwwAuthenticate = response.headers['www-authenticate'];
|
||||
const wwwAuthenticate = response.headers.get('www-authenticate') ?? '';
|
||||
const useSHA256 = wwwAuthenticate.includes('encrypt_type="3"');
|
||||
|
||||
const password = getTapoAdminPassword(options.cloudPassword, useSHA256);
|
||||
@@ -52,16 +51,16 @@ export class TapoAPI {
|
||||
const response2 = await authHttpFetch({
|
||||
credential: undefined,
|
||||
url: url,
|
||||
}, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': auth,
|
||||
'Content-Type': 'multipart/mixed; boundary=--client-stream-boundary--',
|
||||
},
|
||||
}, StreamParser)
|
||||
responseType: 'readable',
|
||||
})
|
||||
|
||||
const tapo = new TapoAPI();
|
||||
tapo.keyExchange = response2.headers['key-exchange'] as string;
|
||||
tapo.keyExchange = response2.headers.get('key-exchange') ?? '';
|
||||
tapo.stream = response2.body.socket;
|
||||
tapo.stream.on('close', () => console.error('stream closed'));
|
||||
// this.stream.on('data', data => console.log('data', data));
|
||||
@@ -129,6 +128,7 @@ export class TapoAPI {
|
||||
'X-If-Encrypt': '0',
|
||||
'X-Session-Id': this.backchannelSessionId,
|
||||
});
|
||||
this.stream.write('\r\n');
|
||||
});
|
||||
|
||||
this.stream.on('close', () => pt.destroy());
|
||||
@@ -150,6 +150,7 @@ export class TapoAPI {
|
||||
writeMessage(this.stream, undefined, Buffer.from(JSON.stringify(request)), {
|
||||
'Content-Type': 'application/json',
|
||||
});
|
||||
this.stream.write('\r\n');
|
||||
|
||||
return deferred.promise;
|
||||
}
|
||||
|
||||
259
plugins/tensorflow-lite/src/predict/craft_utils.py
Normal file
259
plugins/tensorflow-lite/src/predict/craft_utils.py
Normal file
@@ -0,0 +1,259 @@
|
||||
"""
|
||||
Copyright (c) 2019-present NAVER Corp.
|
||||
MIT License
|
||||
"""
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
import numpy as np
|
||||
import cv2
|
||||
import math
|
||||
|
||||
def normalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)):
|
||||
# should be RGB order
|
||||
img = in_img.copy().astype(np.float32)
|
||||
|
||||
img -= np.array([mean[0] * 255.0, mean[1] * 255.0, mean[2] * 255.0], dtype=np.float32)
|
||||
img /= np.array([variance[0] * 255.0, variance[1] * 255.0, variance[2] * 255.0], dtype=np.float32)
|
||||
return img
|
||||
|
||||
""" auxiliary functions """
|
||||
# unwarp corodinates
|
||||
def warpCoord(Minv, pt):
|
||||
out = np.matmul(Minv, (pt[0], pt[1], 1))
|
||||
return np.array([out[0]/out[2], out[1]/out[2]])
|
||||
""" end of auxiliary functions """
|
||||
|
||||
|
||||
def getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text, estimate_num_chars=False):
|
||||
# prepare data
|
||||
linkmap = linkmap.copy()
|
||||
textmap = textmap.copy()
|
||||
img_h, img_w = textmap.shape
|
||||
|
||||
""" labeling method """
|
||||
ret, text_score = cv2.threshold(textmap, low_text, 1, 0)
|
||||
ret, link_score = cv2.threshold(linkmap, link_threshold, 1, 0)
|
||||
|
||||
text_score_comb = np.clip(text_score + link_score, 0, 1)
|
||||
nLabels, labels, stats, centroids = cv2.connectedComponentsWithStats(text_score_comb.astype(np.uint8), connectivity=4)
|
||||
|
||||
det = []
|
||||
mapper = []
|
||||
for k in range(1,nLabels):
|
||||
# size filtering
|
||||
size = stats[k, cv2.CC_STAT_AREA]
|
||||
if size < 10: continue
|
||||
|
||||
# thresholding
|
||||
if np.max(textmap[labels==k]) < text_threshold: continue
|
||||
|
||||
# make segmentation map
|
||||
segmap = np.zeros(textmap.shape, dtype=np.uint8)
|
||||
segmap[labels==k] = 255
|
||||
if estimate_num_chars:
|
||||
from scipy.ndimage import label
|
||||
_, character_locs = cv2.threshold((textmap - linkmap) * segmap /255., text_threshold, 1, 0)
|
||||
_, n_chars = label(character_locs)
|
||||
mapper.append(n_chars)
|
||||
else:
|
||||
mapper.append(k)
|
||||
segmap[np.logical_and(link_score==1, text_score==0)] = 0 # remove link area
|
||||
x, y = stats[k, cv2.CC_STAT_LEFT], stats[k, cv2.CC_STAT_TOP]
|
||||
w, h = stats[k, cv2.CC_STAT_WIDTH], stats[k, cv2.CC_STAT_HEIGHT]
|
||||
niter = int(math.sqrt(size * min(w, h) / (w * h)) * 2)
|
||||
sx, ex, sy, ey = x - niter, x + w + niter + 1, y - niter, y + h + niter + 1
|
||||
# boundary check
|
||||
if sx < 0 : sx = 0
|
||||
if sy < 0 : sy = 0
|
||||
if ex >= img_w: ex = img_w
|
||||
if ey >= img_h: ey = img_h
|
||||
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1 + niter, 1 + niter))
|
||||
segmap[sy:ey, sx:ex] = cv2.dilate(segmap[sy:ey, sx:ex], kernel)
|
||||
|
||||
# make box
|
||||
np_contours = np.roll(np.array(np.where(segmap!=0)),1,axis=0).transpose().reshape(-1,2)
|
||||
rectangle = cv2.minAreaRect(np_contours)
|
||||
box = cv2.boxPoints(rectangle)
|
||||
|
||||
# align diamond-shape
|
||||
w, h = np.linalg.norm(box[0] - box[1]), np.linalg.norm(box[1] - box[2])
|
||||
box_ratio = max(w, h) / (min(w, h) + 1e-5)
|
||||
if abs(1 - box_ratio) <= 0.1:
|
||||
l, r = min(np_contours[:,0]), max(np_contours[:,0])
|
||||
t, b = min(np_contours[:,1]), max(np_contours[:,1])
|
||||
box = np.array([[l, t], [r, t], [r, b], [l, b]], dtype=np.float32)
|
||||
|
||||
# make clock-wise order
|
||||
startidx = box.sum(axis=1).argmin()
|
||||
box = np.roll(box, 4-startidx, 0)
|
||||
box = np.array(box)
|
||||
|
||||
det.append(box)
|
||||
|
||||
return det, labels, mapper
|
||||
|
||||
def getPoly_core(boxes, labels, mapper, linkmap):
|
||||
# configs
|
||||
num_cp = 5
|
||||
max_len_ratio = 0.7
|
||||
expand_ratio = 1.45
|
||||
max_r = 2.0
|
||||
step_r = 0.2
|
||||
|
||||
polys = []
|
||||
for k, box in enumerate(boxes):
|
||||
# size filter for small instance
|
||||
w, h = int(np.linalg.norm(box[0] - box[1]) + 1), int(np.linalg.norm(box[1] - box[2]) + 1)
|
||||
if w < 10 or h < 10:
|
||||
polys.append(None); continue
|
||||
|
||||
# warp image
|
||||
tar = np.float32([[0,0],[w,0],[w,h],[0,h]])
|
||||
M = cv2.getPerspectiveTransform(box, tar)
|
||||
word_label = cv2.warpPerspective(labels, M, (w, h), flags=cv2.INTER_NEAREST)
|
||||
try:
|
||||
Minv = np.linalg.inv(M)
|
||||
except:
|
||||
polys.append(None); continue
|
||||
|
||||
# binarization for selected label
|
||||
cur_label = mapper[k]
|
||||
word_label[word_label != cur_label] = 0
|
||||
word_label[word_label > 0] = 1
|
||||
|
||||
""" Polygon generation """
|
||||
# find top/bottom contours
|
||||
cp = []
|
||||
max_len = -1
|
||||
for i in range(w):
|
||||
region = np.where(word_label[:,i] != 0)[0]
|
||||
if len(region) < 2 : continue
|
||||
cp.append((i, region[0], region[-1]))
|
||||
length = region[-1] - region[0] + 1
|
||||
if length > max_len: max_len = length
|
||||
|
||||
# pass if max_len is similar to h
|
||||
if h * max_len_ratio < max_len:
|
||||
polys.append(None); continue
|
||||
|
||||
# get pivot points with fixed length
|
||||
tot_seg = num_cp * 2 + 1
|
||||
seg_w = w / tot_seg # segment width
|
||||
pp = [None] * num_cp # init pivot points
|
||||
cp_section = [[0, 0]] * tot_seg
|
||||
seg_height = [0] * num_cp
|
||||
seg_num = 0
|
||||
num_sec = 0
|
||||
prev_h = -1
|
||||
for i in range(0,len(cp)):
|
||||
(x, sy, ey) = cp[i]
|
||||
if (seg_num + 1) * seg_w <= x and seg_num <= tot_seg:
|
||||
# average previous segment
|
||||
if num_sec == 0: break
|
||||
cp_section[seg_num] = [cp_section[seg_num][0] / num_sec, cp_section[seg_num][1] / num_sec]
|
||||
num_sec = 0
|
||||
|
||||
# reset variables
|
||||
seg_num += 1
|
||||
prev_h = -1
|
||||
|
||||
# accumulate center points
|
||||
cy = (sy + ey) * 0.5
|
||||
cur_h = ey - sy + 1
|
||||
cp_section[seg_num] = [cp_section[seg_num][0] + x, cp_section[seg_num][1] + cy]
|
||||
num_sec += 1
|
||||
|
||||
if seg_num % 2 == 0: continue # No polygon area
|
||||
|
||||
if prev_h < cur_h:
|
||||
pp[int((seg_num - 1)/2)] = (x, cy)
|
||||
seg_height[int((seg_num - 1)/2)] = cur_h
|
||||
prev_h = cur_h
|
||||
|
||||
# processing last segment
|
||||
if num_sec != 0:
|
||||
cp_section[-1] = [cp_section[-1][0] / num_sec, cp_section[-1][1] / num_sec]
|
||||
|
||||
# pass if num of pivots is not sufficient or segment width is smaller than character height
|
||||
if None in pp or seg_w < np.max(seg_height) * 0.25:
|
||||
polys.append(None); continue
|
||||
|
||||
# calc median maximum of pivot points
|
||||
half_char_h = np.median(seg_height) * expand_ratio / 2
|
||||
|
||||
# calc gradiant and apply to make horizontal pivots
|
||||
new_pp = []
|
||||
for i, (x, cy) in enumerate(pp):
|
||||
dx = cp_section[i * 2 + 2][0] - cp_section[i * 2][0]
|
||||
dy = cp_section[i * 2 + 2][1] - cp_section[i * 2][1]
|
||||
if dx == 0: # gradient if zero
|
||||
new_pp.append([x, cy - half_char_h, x, cy + half_char_h])
|
||||
continue
|
||||
rad = - math.atan2(dy, dx)
|
||||
c, s = half_char_h * math.cos(rad), half_char_h * math.sin(rad)
|
||||
new_pp.append([x - s, cy - c, x + s, cy + c])
|
||||
|
||||
# get edge points to cover character heatmaps
|
||||
isSppFound, isEppFound = False, False
|
||||
grad_s = (pp[1][1] - pp[0][1]) / (pp[1][0] - pp[0][0]) + (pp[2][1] - pp[1][1]) / (pp[2][0] - pp[1][0])
|
||||
grad_e = (pp[-2][1] - pp[-1][1]) / (pp[-2][0] - pp[-1][0]) + (pp[-3][1] - pp[-2][1]) / (pp[-3][0] - pp[-2][0])
|
||||
for r in np.arange(0.5, max_r, step_r):
|
||||
dx = 2 * half_char_h * r
|
||||
if not isSppFound:
|
||||
line_img = np.zeros(word_label.shape, dtype=np.uint8)
|
||||
dy = grad_s * dx
|
||||
p = np.array(new_pp[0]) - np.array([dx, dy, dx, dy])
|
||||
cv2.line(line_img, (int(p[0]), int(p[1])), (int(p[2]), int(p[3])), 1, thickness=1)
|
||||
if np.sum(np.logical_and(word_label, line_img)) == 0 or r + 2 * step_r >= max_r:
|
||||
spp = p
|
||||
isSppFound = True
|
||||
if not isEppFound:
|
||||
line_img = np.zeros(word_label.shape, dtype=np.uint8)
|
||||
dy = grad_e * dx
|
||||
p = np.array(new_pp[-1]) + np.array([dx, dy, dx, dy])
|
||||
cv2.line(line_img, (int(p[0]), int(p[1])), (int(p[2]), int(p[3])), 1, thickness=1)
|
||||
if np.sum(np.logical_and(word_label, line_img)) == 0 or r + 2 * step_r >= max_r:
|
||||
epp = p
|
||||
isEppFound = True
|
||||
if isSppFound and isEppFound:
|
||||
break
|
||||
|
||||
# pass if boundary of polygon is not found
|
||||
if not (isSppFound and isEppFound):
|
||||
polys.append(None); continue
|
||||
|
||||
# make final polygon
|
||||
poly = []
|
||||
poly.append(warpCoord(Minv, (spp[0], spp[1])))
|
||||
for p in new_pp:
|
||||
poly.append(warpCoord(Minv, (p[0], p[1])))
|
||||
poly.append(warpCoord(Minv, (epp[0], epp[1])))
|
||||
poly.append(warpCoord(Minv, (epp[2], epp[3])))
|
||||
for p in reversed(new_pp):
|
||||
poly.append(warpCoord(Minv, (p[2], p[3])))
|
||||
poly.append(warpCoord(Minv, (spp[2], spp[3])))
|
||||
|
||||
# add to final result
|
||||
polys.append(np.array(poly))
|
||||
|
||||
return polys
|
||||
|
||||
def getDetBoxes(textmap, linkmap, text_threshold, link_threshold, low_text, poly=False, estimate_num_chars=False):
|
||||
if poly and estimate_num_chars:
|
||||
raise Exception("Estimating the number of characters not currently supported with poly.")
|
||||
boxes, labels, mapper = getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text, estimate_num_chars)
|
||||
|
||||
if poly:
|
||||
polys = getPoly_core(boxes, labels, mapper, linkmap)
|
||||
else:
|
||||
polys = [None] * len(boxes)
|
||||
|
||||
return boxes, polys, mapper
|
||||
|
||||
def adjustResultCoordinates(polys, ratio_w, ratio_h, ratio_net = 2):
|
||||
if len(polys) > 0:
|
||||
polys = np.array(polys)
|
||||
for k in range(len(polys)):
|
||||
if polys[k] is not None:
|
||||
polys[k] *= (ratio_w * ratio_net, ratio_h * ratio_net)
|
||||
return polys
|
||||
@@ -40,7 +40,7 @@ def cosine_similarity(vector_a, vector_b):
|
||||
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "Recognize")
|
||||
|
||||
class RecognizeDetection(PredictPlugin):
|
||||
class FaceRecognizeDetection(PredictPlugin):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(nativeId=nativeId)
|
||||
|
||||
@@ -154,6 +154,10 @@ class RecognizeDetection(PredictPlugin):
|
||||
ret = await super().run_detection_image(image, detection_session)
|
||||
|
||||
detections = ret["detections"]
|
||||
|
||||
# filter any non face detections because this is using an old model that includes plates and text
|
||||
detections = [d for d in detections if d["className"] == "face"]
|
||||
|
||||
# non max suppression on detections
|
||||
for i in range(len(detections)):
|
||||
d1 = detections[i]
|
||||
@@ -202,8 +206,10 @@ class RecognizeDetection(PredictPlugin):
|
||||
for d in ret["detections"]:
|
||||
if d["className"] == "face":
|
||||
futures.append(asyncio.ensure_future(self.setEmbedding(d, image)))
|
||||
elif d["className"] == "plate":
|
||||
futures.append(asyncio.ensure_future(self.setLabel(d, image)))
|
||||
# elif d["className"] == "plate":
|
||||
# futures.append(asyncio.ensure_future(self.setLabel(d, image)))
|
||||
# elif d['className'] == 'text':
|
||||
# futures.append(asyncio.ensure_future(self.setLabel(d, image)))
|
||||
|
||||
if len(futures):
|
||||
await asyncio.wait(futures)
|
||||
179
plugins/tensorflow-lite/src/predict/text_recognize.py
Normal file
179
plugins/tensorflow-lite/src/predict/text_recognize.py
Normal file
@@ -0,0 +1,179 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
import traceback
|
||||
from asyncio import Future
|
||||
from typing import Any, List, Tuple
|
||||
|
||||
import numpy as np
|
||||
import scrypted_sdk
|
||||
from PIL import Image
|
||||
from scrypted_sdk import ObjectDetectionResult, ObjectDetectionSession, ObjectsDetected
|
||||
|
||||
from common.text import prepare_text_result, process_text_result
|
||||
from predict import Prediction, PredictPlugin
|
||||
from predict.craft_utils import normalizeMeanVariance
|
||||
from predict.rectangle import Rectangle
|
||||
from predict.text_skew import find_adjacent_groups
|
||||
|
||||
from .craft_utils import adjustResultCoordinates, getDetBoxes
|
||||
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "TextDetect")
|
||||
|
||||
|
||||
class TextRecognition(PredictPlugin):
|
||||
def __init__(self, nativeId: str | None = None):
|
||||
super().__init__(nativeId=nativeId)
|
||||
|
||||
self.inputheight = 640
|
||||
self.inputwidth = 640
|
||||
|
||||
self.labels = {
|
||||
0: "text",
|
||||
}
|
||||
self.loop = asyncio.get_event_loop()
|
||||
self.minThreshold = 0.1
|
||||
|
||||
self.detectModel = self.downloadModel("craft")
|
||||
self.textModel = self.downloadModel("vgg_english_g2")
|
||||
|
||||
def downloadModel(self, model: str):
|
||||
pass
|
||||
|
||||
def predictDetectModel(self, input):
|
||||
pass
|
||||
|
||||
def predictTextModel(self, input):
|
||||
pass
|
||||
|
||||
async def detect_once(
|
||||
self, input: Image.Image, settings: Any, src_size, cvss
|
||||
) -> scrypted_sdk.ObjectsDetected:
|
||||
image_tensor = normalizeMeanVariance(np.array(input))
|
||||
# reshape to c w h
|
||||
image_tensor = image_tensor.transpose([2, 0, 1])
|
||||
# add extra dimension to tensor
|
||||
image_tensor = np.expand_dims(image_tensor, axis=0)
|
||||
|
||||
y = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor, lambda: self.predictDetectModel(image_tensor)
|
||||
)
|
||||
|
||||
estimate_num_chars = False
|
||||
ratio_h = ratio_w = 1
|
||||
text_threshold = 0.7
|
||||
link_threshold = 0.9
|
||||
low_text = 0.4
|
||||
poly = False
|
||||
|
||||
boxes_list, polys_list = [], []
|
||||
for out in y:
|
||||
# make score and link map
|
||||
score_text = out[:, :, 0]
|
||||
score_link = out[:, :, 1]
|
||||
|
||||
# Post-processing
|
||||
boxes, polys, mapper = getDetBoxes(
|
||||
score_text,
|
||||
score_link,
|
||||
text_threshold,
|
||||
link_threshold,
|
||||
low_text,
|
||||
poly,
|
||||
estimate_num_chars,
|
||||
)
|
||||
if not len(boxes):
|
||||
continue
|
||||
|
||||
# coordinate adjustment
|
||||
boxes = adjustResultCoordinates(boxes, ratio_w, ratio_h)
|
||||
polys = adjustResultCoordinates(polys, ratio_w, ratio_h)
|
||||
if estimate_num_chars:
|
||||
boxes = list(boxes)
|
||||
polys = list(polys)
|
||||
for k in range(len(polys)):
|
||||
if estimate_num_chars:
|
||||
boxes[k] = (boxes[k], mapper[k])
|
||||
if polys[k] is None:
|
||||
polys[k] = boxes[k]
|
||||
boxes_list.append(boxes)
|
||||
polys_list.append(polys)
|
||||
|
||||
preds: List[Prediction] = []
|
||||
for boxes in boxes_list:
|
||||
for box in boxes:
|
||||
tl, tr, br, bl = box
|
||||
l = min(tl[0], bl[0])
|
||||
t = min(tl[1], tr[1])
|
||||
r = max(tr[0], br[0])
|
||||
b = max(bl[1], br[1])
|
||||
|
||||
pred = Prediction(0, 1, Rectangle(l, t, r, b))
|
||||
preds.append(pred)
|
||||
|
||||
return self.create_detection_result(preds, src_size, cvss)
|
||||
|
||||
async def run_detection_image(
|
||||
self, image: scrypted_sdk.Image, detection_session: ObjectDetectionSession
|
||||
) -> ObjectsDetected:
|
||||
ret = await super().run_detection_image(image, detection_session)
|
||||
|
||||
detections = ret["detections"]
|
||||
|
||||
futures: List[Future] = []
|
||||
|
||||
boundingBoxes = [d["boundingBox"] for d in detections]
|
||||
if not len(boundingBoxes):
|
||||
return ret
|
||||
|
||||
text_groups = find_adjacent_groups(boundingBoxes)
|
||||
|
||||
detections = []
|
||||
for group in text_groups:
|
||||
boundingBox = group["union"]
|
||||
d: ObjectDetectionResult = {
|
||||
"boundingBox": boundingBox,
|
||||
"score": 1,
|
||||
"className": "text",
|
||||
}
|
||||
futures.append(
|
||||
asyncio.ensure_future(self.setLabel(d, image, group["skew_angle"]))
|
||||
)
|
||||
detections.append(d)
|
||||
|
||||
ret["detections"] = detections
|
||||
|
||||
if len(futures):
|
||||
await asyncio.wait(futures)
|
||||
|
||||
# filter empty labels
|
||||
ret["detections"] = [d for d in detections if d.get("label")]
|
||||
|
||||
return ret
|
||||
|
||||
async def setLabel(
|
||||
self, d: ObjectDetectionResult, image: scrypted_sdk.Image, skew_angle: float
|
||||
):
|
||||
try:
|
||||
|
||||
image_tensor = await prepare_text_result(d, image, skew_angle)
|
||||
preds = await asyncio.get_event_loop().run_in_executor(
|
||||
predictExecutor,
|
||||
lambda: self.predictTextModel(image_tensor),
|
||||
)
|
||||
d["label"] = process_text_result(preds)
|
||||
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
pass
|
||||
|
||||
# width, height, channels
|
||||
def get_input_details(self) -> Tuple[int, int, int]:
|
||||
return (self.inputwidth, self.inputheight, 3)
|
||||
|
||||
def get_input_size(self) -> Tuple[float, float]:
|
||||
return (self.inputwidth, self.inputheight)
|
||||
|
||||
def get_input_format(self) -> str:
|
||||
return "rgb"
|
||||
84
plugins/tensorflow-lite/src/predict/text_skew.py
Normal file
84
plugins/tensorflow-lite/src/predict/text_skew.py
Normal file
@@ -0,0 +1,84 @@
|
||||
from typing import List, Tuple
|
||||
import math
|
||||
|
||||
BoundingBox = Tuple[int, int, int, int]
|
||||
|
||||
|
||||
def union_boxes(boxes: List[BoundingBox]) -> BoundingBox:
|
||||
left = min([box[0] for box in boxes])
|
||||
top = min([box[1] for box in boxes])
|
||||
right = max([box[0] + box[2] for box in boxes])
|
||||
bottom = max([box[1] + box[3] for box in boxes])
|
||||
return left, top, right - left, bottom - top
|
||||
|
||||
|
||||
def are_boxes_adjacent(box1: BoundingBox, box2: BoundingBox):
|
||||
l1, t1, w1, h1 = box1
|
||||
l2, t2, w2, h2 = box2
|
||||
|
||||
line_slop = 2 / 3
|
||||
if t1 > t2 + h2 * line_slop or t2 > t1 + h1 * line_slop:
|
||||
return False
|
||||
|
||||
# Calculate the left and right edges of each box
|
||||
left_edge_box1 = l1
|
||||
right_edge_box1 = l1 + w1
|
||||
left_edge_box2 = l2
|
||||
right_edge_box2 = l2 + w2
|
||||
|
||||
# Determine the larger height between the two boxes
|
||||
larger_height = max(h1, h2)
|
||||
|
||||
threshold = larger_height * 2
|
||||
|
||||
# Calculate the vertical distance between the boxes
|
||||
distance = min(
|
||||
abs(left_edge_box1 - right_edge_box2), abs(left_edge_box2 - right_edge_box1)
|
||||
)
|
||||
|
||||
# Check if the boxes are adjacent along their left or right sides
|
||||
if distance <= threshold:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def find_adjacent_groups(boxes: List[BoundingBox]) -> List[dict]:
|
||||
groups = []
|
||||
|
||||
# sort boxes left to right
|
||||
boxes = sorted(boxes, key=lambda box: box[0])
|
||||
|
||||
for box in boxes:
|
||||
added_to_group = False
|
||||
for group in groups:
|
||||
for other_box in group["boxes"]:
|
||||
if are_boxes_adjacent(box, other_box):
|
||||
group["boxes"].append(box)
|
||||
added_to_group = True
|
||||
break
|
||||
if added_to_group:
|
||||
break
|
||||
if not added_to_group:
|
||||
groups.append({"boxes": [box], "skew_angle": 0})
|
||||
|
||||
# Calculate the skew angle of each group
|
||||
for group in groups:
|
||||
boxes = group["boxes"]
|
||||
group["union"] = union_boxes(boxes)
|
||||
if len(boxes) -1 :
|
||||
lm = (boxes[0][1] + boxes[0][3]) / 2
|
||||
rm = (boxes[-1][1] + boxes[-1][3]) / 2
|
||||
dx = (boxes[-1][0]) - (boxes[0][0] + boxes[0][2])
|
||||
minx = min([box[0] for box in boxes])
|
||||
maxx = max([box[0] + box[2] for box in boxes])
|
||||
maxh = max([box[3] for box in boxes])
|
||||
pad_height = maxh * 0.05
|
||||
dx = maxx - minx
|
||||
group['skew_angle'] = math.atan2(rm - lm, dx) * 2
|
||||
# pad this box by a few pixels
|
||||
group['union'] = (group['union'][0] - pad_height, group['union'][1] - pad_height, group['union'][2] + pad_height * 2, group['union'][3] + pad_height * 2)
|
||||
else:
|
||||
group['skew_angle'] = 0
|
||||
|
||||
return groups
|
||||
@@ -2,6 +2,7 @@
|
||||
pycoral~=2.0
|
||||
tflite-runtime==2.5.0.post1
|
||||
|
||||
# pillow for anything not intel linux, pillow-simd is available on x64 linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
# pillow-simd is available on x64 linux
|
||||
# pillow-simd confirmed not building with arm64 linux or apple silicon
|
||||
Pillow>=5.4.1; 'linux' not in sys_platform or platform_machine != 'x86_64'
|
||||
pillow-simd; 'linux' in sys_platform and platform_machine == 'x86_64'
|
||||
|
||||
20
plugins/tensorflow/.vscode/tasks.json
vendored
20
plugins/tensorflow/.vscode/tasks.json
vendored
@@ -1,20 +0,0 @@
|
||||
{
|
||||
// See https://go.microsoft.com/fwlink/?LinkId=733558
|
||||
// for the documentation about the tasks.json format
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "scrypted: deploy+debug",
|
||||
"type": "shell",
|
||||
"presentation": {
|
||||
"echo": true,
|
||||
"reveal": "silent",
|
||||
"focus": false,
|
||||
"panel": "shared",
|
||||
"showReuseMessage": true,
|
||||
"clear": false
|
||||
},
|
||||
"command": "npm run scrypted-vscode-launch ${config:scrypted.debugHost}",
|
||||
},
|
||||
]
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
# TensorFlow Object Detection for Scrypted
|
||||
|
||||
This plugin adds object detection capabilities to any camera in Scrypted. Having a fast GPU and CPU is highly recommended.
|
||||
|
||||
The TensorFlow Plugin should only be used if you are a Scrypted NVR user. It will provide no benefits to HomeKit, which does its own detection processing.
|
||||
|
||||
## Platform Support
|
||||
|
||||
* Edge TPU (Coral.ai) hardware acceleration is NOT supported by this plugin, install TensorFlow-Lite instead.
|
||||
* Mac users should install CoreML Plugin for hardware acceleration.
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Copyright 2019 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
rm -rf all_models
|
||||
mkdir -p all_models
|
||||
cd all_models
|
||||
wget --content-disposition https://tfhub.dev/tensorflow/ssd_mobilenet_v2/fpnlite_320x320/1?tf-hub-format=compressed
|
||||
wget https://raw.githubusercontent.com/koush/coreml-survival-guide/master/MobileNetV2%2BSSDLite/coco_labels.txt
|
||||
tar xzvf ssd_mobilenet_v2_fpnlite_320x320_1.tar.gz
|
||||
@@ -1 +0,0 @@
|
||||
../all_models/coco_labels.txt
|
||||
@@ -1 +0,0 @@
|
||||
../all_models/saved_model.pb
|
||||
@@ -1 +0,0 @@
|
||||
../all_models/variables
|
||||
@@ -1 +0,0 @@
|
||||
../../tensorflow-lite/src/detect
|
||||
@@ -1,4 +0,0 @@
|
||||
from tf import TensorFlowPlugin
|
||||
|
||||
def create_scrypted_plugin():
|
||||
return TensorFlowPlugin()
|
||||
@@ -1 +0,0 @@
|
||||
../../tensorflow-lite/src/predict
|
||||
@@ -1,8 +0,0 @@
|
||||
tensorflow-macos; sys_platform == 'darwin'
|
||||
tensorflow; sys_platform != 'darwin'
|
||||
|
||||
numpy>=1.16.2
|
||||
|
||||
# pillow for anything not intel linux, pillow-simd is available on x64 linux
|
||||
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
|
||||
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user