Compare commits

...

126 Commits

Author SHA1 Message Date
Koushik Dutta
b3276304d2 postrelease 2024-04-19 20:33:11 -07:00
Koushik Dutta
fcf87cc559 postbeta 2024-04-19 20:06:19 -07:00
Koushik Dutta
12c1d02a5b server: fix auto restart bug lol 2024-04-19 20:06:08 -07:00
Koushik Dutta
216504639b postbeta 2024-04-19 19:42:28 -07:00
Koushik Dutta
6eae1c7de3 server: plugin reload/deletion race. 2024-04-19 19:42:13 -07:00
Koushik Dutta
a5a1959bd0 predict: fix blank detections in dropdown 2024-04-19 11:48:44 -07:00
Koushik Dutta
62e23880fd coreml: handle batching hint failures 2024-04-19 10:33:37 -07:00
Koushik Dutta
9e652c3521 videoanalysis: fix model hints 2024-04-19 09:31:27 -07:00
Koushik Dutta
97004577f3 videoanalysis: id hint 2024-04-19 09:06:53 -07:00
Koushik Dutta
6f3eac4e43 predict: add yolov6s 2024-04-19 08:41:27 -07:00
Koushik Dutta
c435e351c7 server: fix potential race condition around plugin restart requests 2024-04-19 07:46:39 -07:00
Koushik Dutta
ffc9ca14b5 predict: fix double periodic restarts 2024-04-19 07:37:40 -07:00
Koushik Dutta
9b349fdadc Merge branch 'main' of github.com:koush/scrypted 2024-04-18 18:39:50 -07:00
Koushik Dutta
7cf0c427f9 detect: yolov6 2024-04-18 18:39:45 -07:00
Techno Tim
2fb4fbab15 fix(iface logic): Added net to the hlper functions to also detect ifaces that start with 'net' (#1437) 2024-04-18 15:35:50 -07:00
Koushik Dutta
4a50095049 Update build-plugins-changed.yml 2024-04-18 09:58:33 -07:00
Koushik Dutta
2510fafcf7 videonanalysis: notes on character casing todo. 2024-04-18 09:56:38 -07:00
Koushik Dutta
47897da6fb videoanalysis: improve similar character checker 2024-04-18 08:41:49 -07:00
Koushik Dutta
94055d032b unifi-protect: document license plate api 2024-04-17 22:36:28 -07:00
Koushik Dutta
3e7535cc42 unifi-protect: add lpr 2024-04-17 22:35:49 -07:00
Koushik Dutta
8d47e9c473 coreml/core: batching support 2024-04-17 11:27:57 -07:00
Koushik Dutta
3897e78bdc coreml/sdk: batching support 2024-04-17 10:59:54 -07:00
Koushik Dutta
2fbc0c2573 videoanalysis: fix settings order 2024-04-17 08:31:23 -07:00
Koushik Dutta
1c8fd2399d videoanalysis: add support for lpr on smart motion sensor 2024-04-17 08:26:53 -07:00
Koushik Dutta
3abb6472a7 postbeta 2024-04-16 23:24:24 -07:00
Koushik Dutta
6a221eee98 videoanalysis: wip license plate smart sensor 2024-04-16 23:24:12 -07:00
Koushik Dutta
ad9e9f2d1d predict: publish betas 2024-04-16 23:24:00 -07:00
Koushik Dutta
8c6afde1fc common: reduce logging 2024-04-16 23:23:40 -07:00
Koushik Dutta
b7a8f97198 server: fix potential race conditions around plugin restart 2024-04-16 23:23:28 -07:00
Koushik Dutta
f5fabfeedf homekit: fix hidden setting 2024-04-16 15:50:43 -07:00
Koushik Dutta
494f881d05 openvino: change default model 2024-04-16 15:49:47 -07:00
Koushik Dutta
7192c5ddc2 openvino: fix potential thread safety.
coreml/openvino: more recognition wip
2024-04-16 15:48:40 -07:00
Koushik Dutta
b4da52eaa2 snapshot: improve caching 2024-04-16 10:26:40 -07:00
Koushik Dutta
584ea97b08 mqtt: Fix autodiscvoery 2024-04-16 10:26:25 -07:00
Koushik Dutta
807ba81d92 homekit: publish 2024-04-15 11:07:51 -07:00
Koushik Dutta
0e35bac42a homekit: addIdentifyingMaterial false 2024-04-15 10:54:54 -07:00
Koushik Dutta
a5a464e000 homekit: undo revert for later publish 2024-04-15 10:53:58 -07:00
Koushik Dutta
a3a878cbd5 homekit: revert for longer staged rollout 2024-04-15 10:52:19 -07:00
Koushik Dutta
8abdab70e9 coreml/openvino: publish 2024-04-15 10:35:10 -07:00
Koushik Dutta
69fd86a684 homekit: handle mp4 generation shutdown 2024-04-15 07:52:25 -07:00
Koushik Dutta
f0e85f14a9 Merge branch 'main' of github.com:koush/scrypted 2024-04-15 07:22:45 -07:00
Koushik Dutta
6130b7fa0c homekit: add identifying material to prevent name clashing 2024-04-15 07:22:41 -07:00
slyoldfox
6dba80c277 Fixes handling the welcome message status from bticino (#1432)
* Fix voicemail status by parsing the VSC more accurately

* Implement video clip streaming by fetching and converting the voicemail message
2024-04-14 09:35:37 -07:00
Koushik Dutta
0f4ff0d4fc sdk: add missing dom types 2024-04-13 20:05:24 -07:00
Koushik Dutta
3d58600c5f postbeta 2024-04-13 12:53:34 -07:00
Koushik Dutta
9c9909e05b amcrest: Fix probe 2024-04-13 12:28:39 -07:00
Koushik Dutta
9c0d253cae webrtc: fix audio handling on unrecognized codec 2024-04-13 09:43:34 -07:00
Koushik Dutta
c1c9fec62f openvino: working lpr and face recog; 2024-04-12 22:12:07 -07:00
Koushik Dutta
27a1c5269a coreml: fixup detection test 2024-04-12 22:04:55 -07:00
Koushik Dutta
c0c938d9c4 snapshot: fix image resize defaults 2024-04-12 21:44:26 -07:00
Koushik Dutta
1dae1834ad predict: missing files 2024-04-12 20:45:31 -07:00
Koushik Dutta
250b2554d7 coreml: dead code 2024-04-12 18:43:11 -07:00
Koushik Dutta
35de80e94a openvino/coreml: refacotr 2024-04-12 18:41:55 -07:00
Koushik Dutta
ba2bf5692f openvino: update 2024-04-12 12:34:57 -07:00
Koushik Dutta
4684ea6592 coreml: recognition fixes 2024-04-12 12:22:37 -07:00
Koushik Dutta
2ab74bc0f8 core: add label support 2024-04-12 08:42:07 -07:00
Koushik Dutta
0a888364b2 coreml: working lpr 2024-04-11 23:53:30 -07:00
Koushik Dutta
c6ea727a0c mqtt: fix switch 2024-04-11 13:14:06 -07:00
Koushik Dutta
96a0a6bd90 snapshots/fetch: fix request teardown? 2024-04-11 12:54:10 -07:00
Koushik Dutta
bf783c7c3c mqtt: switch auto discovery 2024-04-11 10:07:04 -07:00
Koushik Dutta
cbd11908af homekit: fix aac transcoding for silent audio 2024-04-10 11:36:45 -07:00
Koushik Dutta
3367856715 webrtc: add temporary stun fallback 2024-04-10 10:35:45 -07:00
Koushik Dutta
16d38906fe postbeta 2024-04-09 22:19:09 -07:00
Koushik Dutta
fb37f9f58d coreml: remove yolo9c 2024-04-08 15:07:29 -07:00
Koushik Dutta
7514ccf804 detect: remove old models 2024-04-08 14:57:47 -07:00
Koushik Dutta
267a53e84b ha: publish 2024-04-08 11:33:06 -07:00
Koushik Dutta
10a7877522 tensorflow-lite: fix prediction crash 2024-04-08 09:29:52 -07:00
Koushik Dutta
f15526f78d openvino: switch to scrypted_yolov9c_320 2024-04-07 23:20:31 -07:00
Koushik Dutta
524f9122b7 server: update deps 2024-04-07 22:37:13 -07:00
Koushik Dutta
c35142a112 openvino/coreml: new models 2024-04-07 22:37:04 -07:00
Koushik Dutta
ae63e6004e amcrest: add motion pulse 2024-04-07 19:20:24 -07:00
Koushik Dutta
ab90e2ec02 amcrest: add face detection type 2024-04-07 13:30:07 -07:00
Koushik Dutta
96d536f4b2 tensorflow-lite: change default model for usb 2024-04-07 11:55:31 -07:00
Koushik Dutta
c678b31f6f core/sdk: additional scriptign improvements 2024-04-07 10:28:31 -07:00
Koushik Dutta
0315466b0a core: add storage settings to scripts 2024-04-07 10:19:09 -07:00
Koushik Dutta
0db3b7df5a homekit: datamigration for addIdentifyingMaterial 2024-04-06 20:46:58 -07:00
Koushik Dutta
00d8054de8 homekit: add identifying material to prevent mdns collision 2024-04-06 20:30:11 -07:00
Koushik Dutta
3907547c6f webrtc: fix potential crashes 2024-04-06 12:01:55 -07:00
Koushik Dutta
bd3bc0dcb3 coreml: handle empty face set error 2024-04-06 10:52:44 -07:00
Koushik Dutta
b36783df0a coreml: improve face recognition concurrency 2024-04-05 12:32:29 -07:00
Koushik Dutta
b676c27316 docker: initial nvidia support 2024-04-04 12:48:50 -07:00
Koushik Dutta
bcea7b869b coreml: fix threading 2024-04-04 12:48:11 -07:00
Koushik Dutta
2dd549c042 alexa: fix syncedDevices being undefined 2024-04-04 11:39:43 -07:00
Koushik Dutta
c06e3623b6 amcrest: additional dahua hackery 2024-04-03 10:40:03 -07:00
Koushik Dutta
008e0ecbf7 amcrest: Fix buggy htp firmware on some dahua 2024-04-03 09:48:10 -07:00
Koushik Dutta
e6cb41168f snapshot: better error reporting 2024-04-03 08:57:20 -07:00
Koushik Dutta
95ac72c5c8 coreml: encode embedding 2024-04-02 22:07:13 -07:00
Koushik Dutta
faa667f622 sdk: add embedding field 2024-04-02 21:04:09 -07:00
Koushik Dutta
32868c69fe coreml: working face recog 2024-04-02 20:31:40 -07:00
Koushik Dutta
207cb9d833 homekit: clean up late generator bug 2024-04-02 20:31:30 -07:00
Koushik Dutta
f2de58f59a homekit: fix annexb detection 2024-04-02 15:25:32 -07:00
Koushik Dutta
484682257b coreml: wip face recog 2024-04-02 15:08:54 -07:00
Koushik Dutta
b0b922d209 coreml: plug in inception v1 reset face recognition 2024-04-01 21:36:19 -07:00
Koushik Dutta
e37295fb20 coreml: move vision framework into coreml 2024-04-01 20:45:31 -07:00
Koushik Dutta
2e72366d41 vision-framework: initial release 2024-04-01 10:20:49 -07:00
Koushik Dutta
97b09442e8 tensorflow-lite: fix windows 2024-03-31 16:28:34 -07:00
Koushik Dutta
c2defb8c08 onvif: fix two way audio buffer mtu overflow 2024-03-31 13:24:49 -07:00
Koushik Dutta
aa255530aa postrelease 2024-03-30 20:13:03 -07:00
Koushik Dutta
0b26f4df39 snapshot: avoid internal api 2024-03-30 20:06:02 -07:00
Koushik Dutta
be98083557 webrtc: fix ffmpeg leaks? 2024-03-29 23:32:15 -07:00
Koushik Dutta
f4dcb8e662 openvino: publish new models 2024-03-29 23:16:23 -07:00
Koushik Dutta
45186316a6 tensorflow-lite: publish new models 2024-03-29 23:03:12 -07:00
Koushik Dutta
c6e6c881fe coreml: update models. publish. 2024-03-29 22:52:44 -07:00
Koushik Dutta
62b07ea609 core: fix node upgrade 2024-03-29 13:08:41 -07:00
Koushik Dutta
a00ae60ab0 ha/proxmox: bump versions 2024-03-29 12:53:44 -07:00
Brett Jia
878753a526 server: treat self.device as future (#1401)
* server: treat self.device as future

* simplify

* modify annotation

* modify annotation
2024-03-28 19:36:14 -07:00
Koushik Dutta
3c1801ad01 cameras: fix signal + timeout combined usage 2024-03-27 22:05:05 -07:00
Koushik Dutta
30f9e358b7 cameras: fix fetch timeout bugs 2024-03-27 22:02:26 -07:00
Koushik Dutta
456faea1fd amcrest: fix two way audio termination 2024-03-27 15:39:42 -07:00
Koushik Dutta
5e58b1426e amcrest: fix two way audio termination 2024-03-27 15:39:21 -07:00
Koushik Dutta
ec6d617c09 cameras: update onvif two way 2024-03-27 12:21:41 -07:00
Koushik Dutta
1238abedb1 hikvision: fix events crossing streams in camera channels 2024-03-27 10:02:33 -07:00
Koushik Dutta
3e18b9e6aa amcrest: fix vehicle detection class 2024-03-26 19:29:58 -07:00
Koushik Dutta
dce76b5d87 amcrest: fix object detector types 2024-03-26 18:13:16 -07:00
Koushik Dutta
de645dfacb hikvision: add vehicle support 2024-03-26 14:08:34 -07:00
Koushik Dutta
6fd66db896 amcrest/hikvision: add support for smart detections. publish. 2024-03-26 10:41:32 -07:00
Koushik Dutta
62850163d7 hikvision: implement smart events 2024-03-25 23:22:02 -07:00
Koushik Dutta
b46a385a81 onvif: increase 2 way audio buffer to reduce stutter. 2024-03-25 13:28:29 -07:00
Koushik Dutta
c94fb231c6 cli: fix updater 2024-03-25 12:45:10 -07:00
Koushik Dutta
a3df934a88 chromecast: fix audio playback 2024-03-25 12:44:25 -07:00
Koushik Dutta
a6143e103e postrelease 2024-03-25 12:09:11 -07:00
Koushik Dutta
df705cb0e7 server: rollback portable python to 3.10 2024-03-25 12:09:00 -07:00
Koushik Dutta
6e7f291f81 hikvision: fix two way audio duration 2024-03-25 11:02:58 -07:00
Koushik Dutta
fa5b9f66db python-codecs: Fix process exit leak 2024-03-23 17:45:38 -07:00
Koushik Dutta
f760840a6d ha: publish 2024-03-23 12:48:30 -07:00
Koushik Dutta
f36ee6ccb5 postrelease
postrelease

postrelease
2024-03-23 12:34:40 -07:00
122 changed files with 3175 additions and 1001 deletions

View File

@@ -1,11 +1,11 @@
name: Build changed plugins
on:
push:
branches: ["main"]
paths: ["plugins/**"]
pull_request:
paths: ["plugins/**"]
# push:
# branches: ["main"]
# paths: ["plugins/**"]
# pull_request:
# paths: ["plugins/**"]
workflow_dispatch:
jobs:

1
common/fs/@types/sdk/settings-mixin.d.ts vendored Symbolic link
View File

@@ -0,0 +1 @@
../../../../sdk/dist/src/settings-mixin.d.ts

View File

@@ -0,0 +1 @@
../../../../sdk/dist/src/storage-settings.d.ts

View File

@@ -1,9 +1,10 @@
import sdk, { MixinDeviceBase, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedInterfaceDescriptors } from "@scrypted/sdk";
import sdk, { MixinDeviceBase, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedInterfaceDescriptors, ScryptedMimeTypes } from "@scrypted/sdk";
import { StorageSettings } from "@scrypted/sdk/storage-settings";
import { SettingsMixinDeviceBase } from "@scrypted/sdk/settings-mixin";
import fs from 'fs';
import type { TranspileOptions } from "typescript";
import vm from "vm";
import { ScriptDevice } from "./monaco/script-device";
import path from 'path';
const { systemManager, deviceManager, mediaManager, endpointManager } = sdk;
@@ -28,9 +29,13 @@ export function readFileAsString(f: string) {
}
function getTypeDefs() {
const settingsMixinDefs = readFileAsString('@types/sdk/settings-mixin.d.ts');
const storageSettingsDefs = readFileAsString('@types/sdk/storage-settings.d.ts');
const scryptedTypesDefs = readFileAsString('@types/sdk/types.d.ts');
const scryptedIndexDefs = readFileAsString('@types/sdk/index.d.ts');
return {
settingsMixinDefs,
storageSettingsDefs,
scryptedIndexDefs,
scryptedTypesDefs,
};
@@ -64,6 +69,7 @@ export async function scryptedEval(device: ScryptedDeviceBase, script: string, e
fs: require('realfs'),
ScryptedDeviceBase,
MixinDeviceBase,
StorageSettings,
systemManager,
deviceManager,
endpointManager,
@@ -73,6 +79,8 @@ export async function scryptedEval(device: ScryptedDeviceBase, script: string, e
localStorage: device.storage,
device,
exports: {} as any,
SettingsMixinDeviceBase,
ScryptedMimeTypes,
ScryptedInterface,
ScryptedDeviceType,
// @ts-expect-error
@@ -173,6 +181,16 @@ export function createMonacoEvalDefaults(extraLibs: { [lib: string]: string }) {
"node_modules/@types/scrypted__sdk/types/index.d.ts"
);
monaco.languages.typescript.typescriptDefaults.addExtraLib(
libs['settingsMixin'],
"node_modules/@types/scrypted__sdk/settings-mixin.d.ts"
);
monaco.languages.typescript.typescriptDefaults.addExtraLib(
libs['storageSettings'],
"node_modules/@types/scrypted__sdk/storage-settings.d.ts"
);
monaco.languages.typescript.typescriptDefaults.addExtraLib(
libs['sdk'],
"node_modules/@types/scrypted__sdk/index.d.ts"

View File

@@ -136,12 +136,17 @@ export async function readLine(readable: Readable) {
}
export async function readString(readable: Readable | Promise<Readable>) {
let data = '';
const buffer = await readBuffer(readable);
return buffer.toString();
}
export async function readBuffer(readable: Readable | Promise<Readable>) {
const buffers: Buffer[] = [];
readable = await readable;
readable.on('data', buffer => {
data += buffer.toString();
buffers.push(buffer);
});
readable.resume();
await once(readable, 'end')
return data;
return Buffer.concat(buffers);
}

View File

@@ -1,6 +1,7 @@
{
"compilerOptions": {
"module": "commonjs",
"module": "Node16",
"moduleResolution": "Node16",
"target": "esnext",
"noImplicitAny": true,
"outDir": "./dist",

View File

@@ -1,6 +1,6 @@
# Home Assistant Addon Configuration
name: Scrypted
version: "18-jammy-full.s6-v0.93.0"
version: "20-jammy-full.s6-v0.97.0"
slug: scrypted
description: Scrypted is a high performance home video integration and automation platform
url: "https://github.com/koush/scrypted"

View File

@@ -7,7 +7,8 @@
# install script.
################################################################
ARG BASE="jammy"
FROM ubuntu:${BASE} as header
ARG REPO="ubuntu"
FROM ${REPO}:${BASE} as header
ENV DEBIAN_FRONTEND=noninteractive

View File

@@ -4,11 +4,11 @@ WORKDIR /
# Install miniconda
ENV CONDA_DIR /opt/conda
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
RUN apt update -y && apt -y install wget && wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
/bin/bash ~/miniconda.sh -b -p /opt/conda
# Put conda in path so we can use conda activate
ENV PATH=$CONDA_DIR/bin:$PATH
RUN conda install -c conda-forge cudatoolkit=11.2.2 cudnn=8.1.0
RUN conda -y install -c conda-forge cudatoolkit cudnn
ENV CONDA_PREFIX=/opt/conda
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/

View File

@@ -10,7 +10,7 @@ function readyn() {
}
cd /tmp
SCRYPTED_VERSION=v0.93.0
SCRYPTED_VERSION=v0.96.0
SCRYPTED_TAR_ZST=scrypted-$SCRYPTED_VERSION.tar.zst
if [ -z "$VMID" ]
then

View File

@@ -1,12 +1,12 @@
{
"name": "scrypted",
"version": "1.3.13",
"version": "1.3.14",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "scrypted",
"version": "1.3.13",
"version": "1.3.14",
"license": "ISC",
"dependencies": {
"@scrypted/client": "^1.3.3",

View File

@@ -1,6 +1,6 @@
{
"name": "scrypted",
"version": "1.3.13",
"version": "1.3.14",
"description": "",
"main": "./dist/packages/cli/src/main.js",
"bin": {

View File

@@ -133,11 +133,7 @@ export async function serveMain(installVersion?: string) {
await startServer(installDir);
if (fs.existsSync(EXIT_FILE)) {
console.log('Exiting.');
process.exit(1);
}
else if (fs.existsSync(UPDATE_FILE)) {
if (fs.existsSync(UPDATE_FILE)) {
console.log('Update requested. Installing.');
await runCommandEatError('npm', '--prefix', installDir, 'install', '--production', '@scrypted/server@latest').catch(e => {
console.error('Update failed', e);
@@ -145,6 +141,10 @@ export async function serveMain(installVersion?: string) {
console.log('Exiting.');
process.exit(1);
}
else if (fs.existsSync(EXIT_FILE)) {
console.log('Exiting.');
process.exit(1);
}
else {
console.log(`Service unexpectedly exited. Restarting momentarily.`);
await sleep(10000);

View File

@@ -1,4 +1,4 @@
{
"scrypted.debugHost": "scrypted-server",
"scrypted.debugHost": "127.0.0.1",
}

View File

@@ -1,6 +1,11 @@
<details>
<summary>Changelog</summary>
### 0.3.1
alexa/google-home: fix potential vulnerability. do not allow local network control using cloud tokens belonging to a different user. the plugins are now locked to a specific scrypted cloud account once paired.
### 0.3.0
alexa/google-home: additional auth token checks to harden endpoints for cloud sharing

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/alexa",
"version": "0.3.1",
"version": "0.3.2",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/alexa",
"version": "0.3.1",
"version": "0.3.2",
"dependencies": {
"axios": "^1.3.4",
"uuid": "^9.0.0"

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/alexa",
"version": "0.3.1",
"version": "0.3.2",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",
"prescrypted-setup-project": "scrypted-package-json",

View File

@@ -27,6 +27,7 @@ class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, Mixi
json: true
},
syncedDevices: {
defaultValue: [],
multiple: true,
hide: true
},
@@ -66,7 +67,10 @@ class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, Mixi
alexaHandlers.set('Alexa.Authorization/AcceptGrant', this.onAlexaAuthorization);
alexaHandlers.set('Alexa.Discovery/Discover', this.onDiscoverEndpoints);
this.start();
this.start()
.catch(e => {
this.console.error('startup failed', e);
})
}
async start() {

View File

@@ -10,7 +10,7 @@
"port": 10081,
"request": "attach",
"skipFiles": [
"**/plugin-remote-worker.*",
"**/plugin-console.*",
"<node_internals>/**"
],
"preLaunchTask": "scrypted: deploy+debug",

View File

@@ -1,19 +1,21 @@
{
"name": "@scrypted/amcrest",
"version": "0.0.135",
"version": "0.0.150",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/amcrest",
"version": "0.0.135",
"version": "0.0.150",
"license": "Apache",
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk"
"@scrypted/sdk": "file:../../sdk",
"content-type": "^1.0.5"
},
"devDependencies": {
"@types/node": "^20.10.8"
"@types/content-type": "^1.1.8",
"@types/node": "^20.11.30"
}
},
"../../common": {
@@ -23,23 +25,22 @@
"dependencies": {
"@scrypted/sdk": "file:../sdk",
"@scrypted/server": "file:../server",
"http-auth-utils": "^3.0.2",
"node-fetch-commonjs": "^3.1.1",
"http-auth-utils": "^5.0.1",
"typescript": "^5.3.3"
},
"devDependencies": {
"@types/node": "^20.10.8",
"@types/node": "^20.11.0",
"ts-node": "^10.9.2"
}
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.3.4",
"version": "0.3.29",
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.18.6",
"adm-zip": "^0.4.13",
"axios": "^0.21.4",
"axios": "^1.6.5",
"babel-loader": "^9.1.0",
"babel-plugin-const-enum": "^1.1.0",
"esbuild": "^0.15.9",
@@ -77,15 +78,29 @@
"resolved": "../../sdk",
"link": true
},
"node_modules/@types/content-type": {
"version": "1.1.8",
"resolved": "https://registry.npmjs.org/@types/content-type/-/content-type-1.1.8.tgz",
"integrity": "sha512-1tBhmVUeso3+ahfyaKluXe38p+94lovUZdoVfQ3OnJo9uJC42JT7CBoN3k9HYhAae+GwiBYmHu+N9FZhOG+2Pg==",
"dev": true
},
"node_modules/@types/node": {
"version": "20.10.8",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.10.8.tgz",
"integrity": "sha512-f8nQs3cLxbAFc00vEU59yf9UyGUftkPaLGfvbVOIDdx2i1b8epBqj2aNGyP19fiyXWvlmZ7qC1XLjAzw/OKIeA==",
"version": "20.11.30",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
"integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"dev": true,
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/amcrest",
"version": "0.0.135",
"version": "0.0.150",
"description": "Amcrest Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",
@@ -36,9 +36,11 @@
},
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk"
"@scrypted/sdk": "file:../../sdk",
"content-type": "^1.0.5"
},
"devDependencies": {
"@types/node": "^20.10.8"
"@types/content-type": "^1.1.8",
"@types/node": "^20.11.30"
}
}

View File

@@ -1,10 +1,140 @@
import { AuthFetchCredentialState, HttpFetchOptions, authHttpFetch } from '@scrypted/common/src/http-auth-fetch';
import { Readable } from 'stream';
import { readLine } from '@scrypted/common/src/read-stream';
import { parseHeaders, readBody } from '@scrypted/common/src/rtsp-server';
import contentType from 'content-type';
import { IncomingMessage } from 'http';
import { EventEmitter, Readable } from 'stream';
import { Destroyable } from '../../rtsp/src/rtsp';
import { getDeviceInfo } from './probe';
import { Point } from '@scrypted/sdk';
// Human
// {
// "Action" : "Cross",
// "Class" : "Normal",
// "CountInGroup" : 1,
// "DetectRegion" : [
// [ 455, 260 ],
// [ 3586, 260 ],
// [ 3768, 7580 ],
// [ 382, 7451 ]
// ],
// "Direction" : "Enter",
// "EventID" : 10181,
// "GroupID" : 0,
// "Name" : "Rule1",
// "Object" : {
// "Action" : "Appear",
// "BoundingBox" : [ 2856, 1280, 3880, 4880 ],
// "Center" : [ 3368, 3080 ],
// "Confidence" : 0,
// "LowerBodyColor" : [ 0, 0, 0, 0 ],
// "MainColor" : [ 0, 0, 0, 0 ],
// "ObjectID" : 863,
// "ObjectType" : "Human",
// "RelativeID" : 0,
// "Speed" : 0
// },
// "PTS" : 43380319830.0,
// "RuleID" : 2,
// "Track" : [],
// "UTC" : 1711446999,
// "UTCMS" : 701
// }
// Face
// {
// "CfgRuleId" : 1,
// "Class" : "FaceDetection",
// "CountInGroup" : 2,
// "DetectRegion" : null,
// "EventID" : 10360,
// "EventSeq" : 6,
// "Faces" : [
// {
// "BoundingBox" : [ 1504, 2336, 1728, 2704 ],
// "Center" : [ 1616, 2520 ],
// "ObjectID" : 94,
// "ObjectType" : "HumanFace",
// "RelativeID" : 0
// }
// ],
// "FrameSequence" : 8251212,
// "GroupID" : 6,
// "Mark" : 0,
// "Name" : "FaceDetection",
// "Object" : {
// "Action" : "Appear",
// "BoundingBox" : [ 1504, 2336, 1728, 2704 ],
// "Center" : [ 1616, 2520 ],
// "Confidence" : 19,
// "FrameSequence" : 8251212,
// "ObjectID" : 94,
// "ObjectType" : "HumanFace",
// "RelativeID" : 0,
// "SerialUUID" : "",
// "Source" : 0.0,
// "Speed" : 0,
// "SpeedTypeInternal" : 0
// },
// "Objects" : [
// {
// "Action" : "Appear",
// "BoundingBox" : [ 1504, 2336, 1728, 2704 ],
// "Center" : [ 1616, 2520 ],
// "Confidence" : 19,
// "FrameSequence" : 8251212,
// "ObjectID" : 94,
// "ObjectType" : "HumanFace",
// "RelativeID" : 0,
// "SerialUUID" : "",
// "Source" : 0.0,
// "Speed" : 0,
// "SpeedTypeInternal" : 0
// }
// ],
// "PTS" : 43774941350.0,
// "Priority" : 0,
// "RuleID" : 1,
// "RuleId" : 1,
// "Source" : -1280470024.0,
// "UTC" : 947510337,
// "UTCMS" : 0
// }
export interface AmcrestObjectDetails {
Action: string;
BoundingBox: Point;
Center: Point;
Confidence: number;
LowerBodyColor: [number, number, number, number];
MainColor: [number, number, number, number];
ObjectID: number;
ObjectType: string;
RelativeID: number;
Speed: number;
}
export interface AmcrestEventData {
Action: string;
Class: string;
CountInGroup: number;
DetectRegion: Point[];
Direction: string;
EventID: number;
GroupID: number;
Name: string;
Object: AmcrestObjectDetails;
PTS: number;
RuleID: number;
Track: any[];
UTC: number;
UTCMS: number;
}
export enum AmcrestEvent {
MotionStart = "Code=VideoMotion;action=Start",
MotionStop = "Code=VideoMotion;action=Stop",
MotionInfo = "Code=VideoMotionInfo;action=State",
AudioStart = "Code=AudioMutation;action=Start",
AudioStop = "Code=AudioMutation;action=Stop",
TalkInvite = "Code=_DoTalkAction_;action=Invite",
@@ -18,8 +148,33 @@ export enum AmcrestEvent {
DahuaTalkHangup = "Code=PassiveHungup;action=Start",
DahuaCallDeny = "Code=HungupPhone;action=Pulse",
DahuaTalkPulse = "Code=_CallNoAnswer_;action=Pulse",
FaceDetection = "Code=FaceDetection;action=Start",
SmartMotionHuman = "Code=SmartMotionHuman;action=Start",
SmartMotionVehicle = "Code=Vehicle;action=Start",
CrossLineDetection = "Code=CrossLineDetection;action=Start",
CrossRegionDetection = "Code=CrossRegionDetection;action=Start",
}
async function readAmcrestMessage(client: Readable): Promise<string[]> {
let currentHeaders: string[] = [];
while (true) {
const originalLine = await readLine(client);
const line = originalLine.trim();
if (!line)
return currentHeaders;
// dahua bugs out and sends message without a newline separating the body:
// Content-Length:39
// Code=AudioMutation;action=Start;index=0
if (!line.includes(':')) {
client.unshift(Buffer.from(originalLine + '\n'));
return currentHeaders;
}
currentHeaders.push(line);
}
}
export class AmcrestCameraClient {
credential: AuthFetchCredentialState;
@@ -78,7 +233,8 @@ export class AmcrestCameraClient {
return response.body;
}
async listenEvents() {
async listenEvents(): Promise<Destroyable> {
const events = new EventEmitter();
const url = `http://${this.ip}/cgi-bin/eventManager.cgi?action=attach&codes=[All]`;
console.log('preparing event listener', url);
@@ -86,32 +242,117 @@ export class AmcrestCameraClient {
url,
responseType: 'readable',
});
const stream = response.body;
const stream: IncomingMessage = response.body;
(events as any).destroy = () => {
stream.destroy();
events.removeAllListeners();
};
stream.on('close', () => {
events.emit('close');
});
stream.on('end', () => {
events.emit('end');
});
stream.on('error', e => {
events.emit('error', e);
});
stream.socket.setKeepAlive(true);
stream.on('data', (buffer: Buffer) => {
const data = buffer.toString();
const parts = data.split(';');
let index: string;
try {
for (const part of parts) {
if (part.startsWith('index')) {
index = part.split('=')[1]?.trim();
const ct = stream.headers['content-type'];
// make content type parsable as content disposition filename
const cd = contentType.parse(ct);
let { boundary } = cd.parameters;
boundary = `--${boundary}`;
const boundaryEnd = `${boundary}--`;
(async () => {
while (true) {
let ignore = await readLine(stream);
ignore = ignore.trim();
if (!ignore)
continue;
if (ignore === boundaryEnd)
continue;
// dahua bugs out and sends this.
if (ignore === 'HTTP/1.1 200 OK') {
const message = await readAmcrestMessage(stream);
this.console.log('ignoring dahua http message', message);
message.unshift('');
const headers = parseHeaders(message);
const body = await readBody(stream, headers);
if (body)
this.console.log('ignoring dahua http body', body);
continue;
}
if (ignore !== boundary) {
this.console.error('expected boundary but found', ignore);
this.console.error(response.headers);
throw new Error('expected boundary');
}
const message = await readAmcrestMessage(stream);
events.emit('data', message);
message.unshift('');
const headers = parseHeaders(message);
const body = await readBody(stream, headers);
const data = body.toString();
events.emit('data', data);
const parts = data.split(';');
let index: string;
try {
for (const part of parts) {
if (part.startsWith('index')) {
index = part.split('=')[1]?.trim();
}
}
}
catch (e) {
this.console.error('error parsing index', data);
}
let jsonData: any;
try {
for (const part of parts) {
if (part.startsWith('data')) {
jsonData = JSON.parse(part.split('=')[1]?.trim());
}
}
}
catch (e) {
this.console.error('error parsing data', data);
}
for (const event of Object.values(AmcrestEvent)) {
if (data.indexOf(event) !== -1) {
events.emit('event', event, index, data);
if (event === AmcrestEvent.SmartMotionHuman) {
events.emit('smart', 'person', jsonData);
}
else if (event === AmcrestEvent.SmartMotionVehicle) {
events.emit('smart', 'car', jsonData);
}
else if (event === AmcrestEvent.FaceDetection) {
events.emit('smart', 'face', jsonData);
}
else if (event === AmcrestEvent.CrossLineDetection || event === AmcrestEvent.CrossRegionDetection) {
const eventData: AmcrestEventData = jsonData;
if (eventData?.Object?.ObjectType === 'Human') {
events.emit('smart', 'person', eventData);
}
else if (eventData?.Object?.ObjectType === 'Vehicle') {
events.emit('smart', 'car', eventData);
}
}
}
}
}
catch (e) {
this.console.error('error parsing index', data);
}
// this.console?.log('event', data);
for (const event of Object.values(AmcrestEvent)) {
if (data.indexOf(event) !== -1) {
stream.emit('event', event, index, data);
}
}
});
return stream;
})()
.catch(() => stream.destroy());
return events as any as Destroyable;
}
async enableContinousRecording(channel: number) {

View File

@@ -1,11 +1,11 @@
import { ffmpegLogInitialOutput } from '@scrypted/common/src/media-helpers';
import { readLength } from "@scrypted/common/src/read-stream";
import sdk, { Camera, DeviceCreatorSettings, DeviceInformation, FFmpegInput, Intercom, Lock, MediaObject, MediaStreamOptions, Reboot, RequestPictureOptions, RequestRecordingStreamOptions, ResponseMediaStreamOptions, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting, VideoCameraConfiguration, VideoRecorder } from "@scrypted/sdk";
import sdk, { Camera, DeviceCreatorSettings, DeviceInformation, FFmpegInput, Intercom, Lock, MediaObject, MediaStreamOptions, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, Reboot, RequestPictureOptions, RequestRecordingStreamOptions, ResponseMediaStreamOptions, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting, VideoCameraConfiguration, VideoRecorder } from "@scrypted/sdk";
import child_process, { ChildProcess } from 'child_process';
import { PassThrough, Readable, Stream } from "stream";
import { OnvifIntercom } from "../../onvif/src/onvif-intercom";
import { RtspProvider, RtspSmartCamera, UrlMediaStreamOptions } from "../../rtsp/src/rtsp";
import { AmcrestCameraClient, AmcrestEvent } from "./amcrest-api";
import { AmcrestCameraClient, AmcrestEvent, AmcrestEventData } from "./amcrest-api";
const { mediaManager } = sdk;
@@ -22,12 +22,13 @@ function findValue(blob: string, prefix: string, key: string) {
return parts[1];
}
class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration, Camera, Intercom, Lock, VideoRecorder, Reboot {
class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration, Camera, Intercom, Lock, VideoRecorder, Reboot, ObjectDetector {
eventStream: Stream;
cp: ChildProcess;
client: AmcrestCameraClient;
videoStreamOptions: Promise<UrlMediaStreamOptions[]>;
onvifIntercom = new OnvifIntercom(this);
hasSmartDetection: boolean;
constructor(nativeId: string, provider: RtspProvider) {
super(nativeId, provider);
@@ -36,6 +37,7 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
this.storage.removeItem('amcrestDoorbell');
}
this.hasSmartDetection = this.storage.getItem('hasSmartDetection') === 'true';
this.updateDevice();
this.updateDeviceInfo();
}
@@ -184,10 +186,19 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
if (idx.toString() !== channelNumber)
return;
}
if (event === AmcrestEvent.MotionStart) {
if (event === AmcrestEvent.MotionStart
|| event === AmcrestEvent.SmartMotionHuman
|| event === AmcrestEvent.SmartMotionVehicle
|| event === AmcrestEvent.CrossLineDetection
|| event === AmcrestEvent.CrossRegionDetection) {
this.motionDetected = true;
resetMotionTimeout();
}
else if (event === AmcrestEvent.MotionInfo) {
// this seems to be a motion pulse
if (this.motionDetected)
resetMotionTimeout();
}
else if (event === AmcrestEvent.MotionStop) {
// use resetMotionTimeout
}
@@ -231,9 +242,43 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
}
});
events.on('smart', (className: string, data: AmcrestEventData) => {
if (!this.hasSmartDetection) {
this.hasSmartDetection = true;
this.storage.setItem('hasSmartDetection', 'true');
this.updateDevice();
}
const detected: ObjectsDetected = {
timestamp: Date.now(),
detections: [
{
score: 1,
className,
}
],
};
this.onDeviceEvent(ScryptedInterface.ObjectDetector, detected);
});
return events;
}
async getDetectionInput(detectionId: string, eventId?: any): Promise<MediaObject> {
return;
}
async getObjectTypes(): Promise<ObjectDetectionTypes> {
return {
classes: [
'person',
'face',
'car',
],
}
}
async getOtherSettings(): Promise<Setting[]> {
const ret = await super.getOtherSettings();
ret.push(
@@ -472,13 +517,19 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
if (isDoorbell || twoWayAudio) {
interfaces.push(ScryptedInterface.Intercom);
}
const enableDahuaLock = this.storage.getItem('enableDahuaLock') === 'true';
if (isDoorbell && doorbellType === DAHUA_DOORBELL_TYPE && enableDahuaLock) {
interfaces.push(ScryptedInterface.Lock);
}
const continuousRecording = this.storage.getItem('continuousRecording') === 'true';
if (continuousRecording)
interfaces.push(ScryptedInterface.VideoRecorder);
if (this.hasSmartDetection)
interfaces.push(ScryptedInterface.ObjectDetector);
this.provider.updateDevice(this.nativeId, this.name, interfaces, type);
}
@@ -521,7 +572,7 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
}
const doorbellType = this.storage.getItem('doorbellType');
// not sure if this all works, since i don't actually have a doorbell.
// good luck!
const channel = this.getRtspChannel() || '1';
@@ -548,12 +599,22 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
}
else {
args.push(
"-vn",
'-acodec', 'aac',
'-f', 'adts',
'pipe:3',
);
"-vn",
'-acodec', 'aac',
'-f', 'adts',
'pipe:3',
);
contentType = 'Audio/AAC';
// args.push(
// "-vn",
// '-acodec', 'pcm_mulaw',
// '-ac', '1',
// '-ar', '8000',
// '-sample_fmt', 's16',
// '-f', 'mulaw',
// 'pipe:3',
// );
// contentType = 'Audio/G.711A';
}
this.console.log('ffmpeg intercom', args);
@@ -573,15 +634,19 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
// seems the dahua doorbells preferred 1024 chunks. should investigate adts
// parsing and sending multipart chunks instead.
const passthrough = new PassThrough();
const abortController = new AbortController();
this.getClient().request({
url,
method: 'POST',
headers: {
'Content-Type': contentType,
'Content-Length': '9999999'
'Content-Length': '9999999',
},
signal: abortController.signal,
responseType: 'readable',
}, passthrough);
}, passthrough)
.catch(() => { })
.finally(() => this.console.log('request finished'))
try {
while (true) {
@@ -593,7 +658,8 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
}
finally {
this.console.log('audio finished');
passthrough.end();
passthrough.destroy();
abortController.abort();
}
this.stopIntercom();

View File

@@ -29,9 +29,14 @@ export async function getDeviceInfo(credential: AuthFetchCredentialState, addres
vals[k] = v.trim();
}
return {
const ret = {
deviceType: vals.deviceType,
hardwareVersion: vals.hardwareVersion,
serialNumber: vals.serialNumber,
}
};
if (!ret.deviceType && !ret.hardwareVersion && !ret.serialNumber)
throw new Error('not amcrest');
return ret;
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/bticino",
"version": "0.0.13",
"version": "0.0.15",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/bticino",
"version": "0.0.13",
"version": "0.0.15",
"dependencies": {
"@slyoldfox/sip": "^0.0.6-1",
"sdp": "^3.0.3",
@@ -30,23 +30,23 @@
"dependencies": {
"@scrypted/sdk": "file:../sdk",
"@scrypted/server": "file:../server",
"http-auth-utils": "^3.0.2",
"node-fetch-commonjs": "^3.1.1",
"typescript": "^4.4.3"
"http-auth-utils": "^5.0.1",
"typescript": "^5.3.3"
},
"devDependencies": {
"@types/node": "^16.9.0"
"@types/node": "^20.11.0",
"ts-node": "^10.9.2"
}
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.3.2",
"version": "0.3.14",
"dev": true,
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.18.6",
"adm-zip": "^0.4.13",
"axios": "^0.21.4",
"axios": "^1.6.5",
"babel-loader": "^9.1.0",
"babel-plugin-const-enum": "^1.1.0",
"esbuild": "^0.15.9",
@@ -1219,10 +1219,10 @@
"requires": {
"@scrypted/sdk": "file:../sdk",
"@scrypted/server": "file:../server",
"@types/node": "^16.9.0",
"http-auth-utils": "^3.0.2",
"node-fetch-commonjs": "^3.1.1",
"typescript": "^4.4.3"
"@types/node": "^20.11.0",
"http-auth-utils": "^5.0.1",
"ts-node": "^10.9.2",
"typescript": "^5.3.3"
}
},
"@scrypted/sdk": {
@@ -1232,7 +1232,7 @@
"@types/node": "^18.11.18",
"@types/stringify-object": "^4.0.0",
"adm-zip": "^0.4.13",
"axios": "^0.21.4",
"axios": "^1.6.5",
"babel-loader": "^9.1.0",
"babel-plugin-const-enum": "^1.1.0",
"esbuild": "^0.15.9",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/bticino",
"version": "0.0.13",
"version": "0.0.15",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",
"prescrypted-setup-project": "scrypted-package-json",

View File

@@ -17,6 +17,12 @@ import { ffmpegLogInitialOutput, safeKillFFmpeg, safePrintFFmpegArguments } from
import { PersistentSipManager } from './persistent-sip-manager';
import { InviteHandler } from './bticino-inviteHandler';
import { SipOptions, SipRequest } from '../../sip/src/sip-manager';
import fs from "fs"
import url from "url"
import path from 'path';
import { default as stream } from 'node:stream'
import type { ReadableStream } from 'node:stream/web'
import { finished } from "stream/promises";
import { get } from 'http'
import { ControllerApi } from './c300x-controller-api';
@@ -25,6 +31,7 @@ import { BticinoMuteSwitch } from './bticino-mute-switch';
const STREAM_TIMEOUT = 65000;
const { mediaManager } = sdk;
const BTICINO_CLIPS = path.join(process.env.SCRYPTED_PLUGIN_VOLUME, 'bticino-clips');
export class BticinoSipCamera extends ScryptedDeviceBase implements MotionSensor, DeviceProvider, Intercom, Camera, VideoCamera, Settings, BinarySensor, HttpRequestHandler, VideoClips, Reboot {
@@ -147,11 +154,87 @@ export class BticinoSipCamera extends ScryptedDeviceBase implements MotionSensor
});
}
getVideoClip(videoId: string): Promise<MediaObject> {
let c300x = SipHelper.getIntercomIp(this)
const url = `http://${c300x}:8080/voicemail?msg=${videoId}/aswm.avi&raw=true`;
return mediaManager.createMediaObjectFromUrl(url);
async getVideoClip(videoId: string): Promise<MediaObject> {
const outputfile = await this.fetchAndConvertVoicemailMessage(videoId);
const fileURLToPath: string = url.pathToFileURL(outputfile).toString()
this.console.log(`Creating mediaObject for url: ${fileURLToPath}`)
return await mediaManager.createMediaObjectFromUrl(fileURLToPath);
}
private async fetchAndConvertVoicemailMessage(videoId: string) {
let c300x = SipHelper.getIntercomIp(this)
const response = await fetch(`http://${c300x}:8080/voicemail?msg=${videoId}/aswm.avi&raw=true`);
const contentLength: number = Number(response.headers.get("Content-Length"));
const lastModified: Date = new Date(response.headers.get("Last-Modified-Time"));
const avifile = `${BTICINO_CLIPS}/${videoId}.avi`;
const outputfile = `${BTICINO_CLIPS}/${videoId}.mp4`;
if (!fs.existsSync(BTICINO_CLIPS)) {
this.console.log(`Creating clips dir at: ${BTICINO_CLIPS}`)
fs.mkdirSync(BTICINO_CLIPS);
}
if (fs.existsSync(avifile)) {
const stat = fs.statSync(avifile);
if (stat.size != contentLength || stat.mtime.getTime() != lastModified.getTime()) {
this.console.log(`Size ${stat.size} != ${contentLength} or time ${stat.mtime.getTime} != ${lastModified.getTime}`)
try {
fs.rmSync(avifile);
} catch (e) { }
try {
fs.rmSync(outputfile);
} catch (e) { }
} else {
this.console.log(`Keeping the cached video at ${avifile}`)
}
}
if (!fs.existsSync(avifile)) {
this.console.log("Starting download.")
await finished(stream.Readable.from(response.body as ReadableStream<Uint8Array>).pipe(fs.createWriteStream(avifile)));
this.console.log("Download finished.")
try {
this.console.log(`Setting mtime to ${lastModified}`)
fs.utimesSync(avifile, lastModified, lastModified);
} catch (e) { }
}
const ffmpegPath = await mediaManager.getFFmpegPath();
const ffmpegArgs = [
'-hide_banner',
'-nostats',
'-y',
'-i', avifile,
outputfile
];
safePrintFFmpegArguments(console, ffmpegArgs);
const cp = child_process.spawn(ffmpegPath, ffmpegArgs, {
stdio: ['pipe', 'pipe', 'pipe', 'pipe'],
});
const p = new Promise((resolveFunc) => {
cp.stdout.on("data", (x) => {
this.console.log(x.toString());
});
cp.stderr.on("data", (x) => {
this.console.error(x.toString());
});
cp.on("exit", (code) => {
resolveFunc(code);
});
});
let returnCode = await p;
this.console.log(`Converted file returned code: ${returnCode}`);
return outputfile;
}
getVideoClipThumbnail(thumbnailId: string): Promise<MediaObject> {
let c300x = SipHelper.getIntercomIp(this)
const url = `http://${c300x}:8080/voicemail?msg=${thumbnailId}/aswm.jpg&raw=true`;

View File

@@ -33,8 +33,10 @@ export class VoicemailHandler extends SipRequestHandler {
handle(request: SipRequest) {
const lastVoicemailMessageTimestamp : number = Number.parseInt( this.sipCamera.storage.getItem('lastVoicemailMessageTimestamp') ) || -1
const message : string = request.content.toString()
if( message.startsWith('*#8**40*0*0*') || message.startsWith('*#8**40*1*0*') ) {
this.aswmIsEnabled = message.startsWith('*#8**40*1*0*');
let matches : Array<RegExpMatchArray> = [...message.matchAll(/\*#8\*\*40\*([01])\*([01])\*/gm)]
if( matches && matches.length > 0 && matches[0].length > 0 ) {
this.sipCamera.console.debug( "Answering machine state: " + matches[0][1] + " / Welcome message state: " + matches[0][2] );
this.aswmIsEnabled = matches[0][1] == '1';
if( this.isEnabled() ) {
this.sipCamera.console.debug("Handling incoming answering machine reply")
const messages : string[] = message.split(';')
@@ -60,6 +62,8 @@ export class VoicemailHandler extends SipRequestHandler {
this.sipCamera.console.debug("No new messages since: " + lastVoicemailMessageTimestamp + " lastMessage: " + lastMessageTimestamp)
}
}
} else {
this.sipCamera.console.debug("Not handling message: " + message)
}
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/chromecast",
"version": "0.1.57",
"version": "0.1.58",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/chromecast",
"version": "0.1.57",
"version": "0.1.58",
"license": "Apache-2.0",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/chromecast",
"version": "0.1.57",
"version": "0.1.58",
"description": "Send video, audio, and text to speech notifications to Chromecast and Google Home devices",
"author": "Scrypted",
"license": "Apache-2.0",

View File

@@ -183,7 +183,7 @@ class CastDevice extends ScryptedDeviceBase implements MediaPlayer, Refresh, Eng
media = await mediaManager.createMediaObjectFromUrl(media);
}
}
else if (options?.mimeType?.startsWith('image/')) {
else if (options?.mimeType?.startsWith('image/') || options?.mimeType?.startsWith('audio/')) {
url = await mediaManager.convertMediaObjectToInsecureLocalUrl(media, options?.mimeType);
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/core",
"version": "0.3.18",
"version": "0.3.23",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/core",
"version": "0.3.18",
"version": "0.3.23",
"license": "Apache-2.0",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/core",
"version": "0.3.18",
"version": "0.3.23",
"description": "Scrypted Core plugin. Provides the UI, websocket, and engine.io APIs.",
"author": "Scrypted",
"license": "Apache-2.0",

View File

@@ -18,7 +18,7 @@ const { systemManager, deviceManager, endpointManager } = sdk;
export function getAddresses() {
const addresses: string[] = [];
for (const [iface, nif] of Object.entries(os.networkInterfaces())) {
if (iface.startsWith('en') || iface.startsWith('eth') || iface.startsWith('wlan')) {
if (iface.startsWith('en') || iface.startsWith('eth') || iface.startsWith('wlan') || iface.startsWith('net')) {
addresses.push(iface);
addresses.push(...nif.map(addr => addr.address));
}

View File

@@ -11,7 +11,7 @@ export async function checkLxcDependencies() {
let needRestart = false;
if (!process.version.startsWith('v20.')) {
const cp = child_process.spawn('sh', ['-c', 'curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && apt install -y nodejs']);
const cp = child_process.spawn('sh', ['-c', 'apt update -y && curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && apt install -y nodejs']);
const [exitCode] = await once(cp, 'exit');
if (exitCode !== 0)
sdk.log.a('Failed to install Node.js 20.x.');

View File

@@ -1,9 +1,9 @@
{
"compilerOptions": {
"module": "commonjs",
"target": "ES2021",
"resolveJsonModule": true,
"module": "Node16",
"moduleResolution": "Node16",
"target": "esnext",
"resolveJsonModule": true,
"esModuleInterop": true,
"sourceMap": true
},

View File

@@ -57,7 +57,10 @@ export default {
t += `<tspan x='${x}' dy='${toffset}em'>${Math.round(detection.score * 100) / 100}</tspan>`
toffset -= 1.2;
}
const tname = detection.className + (detection.id ? `: ${detection.id}` : '')
const tname = detection.className
+ (detection.id || detection.label ? ':' : '')
+ (detection.id ? ` ${detection.id}` : '')
+ (detection.label ? ` ${detection.label}` : '')
t += `<tspan x='${x}' dy='${toffset}em'>${tname}</tspan>`
const fs = 30 * svgScale;

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/coreml",
"version": "0.1.29",
"version": "0.1.45",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/coreml",
"version": "0.1.29",
"version": "0.1.45",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -34,6 +34,7 @@
"type": "API",
"interfaces": [
"Settings",
"DeviceProvider",
"ObjectDetection",
"ObjectDetectionPreview"
]
@@ -41,5 +42,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.29"
"version": "0.1.45"
}

1
plugins/coreml/src/common Symbolic link
View File

@@ -0,0 +1 @@
../../openvino/src/common

View File

@@ -1,24 +1,42 @@
from __future__ import annotations
import ast
import asyncio
import concurrent.futures
import os
import re
from typing import Any, Tuple
from typing import Any, List, Tuple
import coremltools as ct
import scrypted_sdk
from PIL import Image
from scrypted_sdk import Setting, SettingValue
import yolo
from predict import Prediction, PredictPlugin, Rectangle
from common import yolo
from coreml.recognition import CoreMLRecognition
from predict import Prediction, PredictPlugin
from predict.rectangle import Rectangle
predictExecutor = concurrent.futures.ThreadPoolExecutor(8, "CoreML-Predict")
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "CoreML-Predict")
availableModels = [
"Default",
"scrypted_yolov9c_320",
"scrypted_yolov9c",
"scrypted_yolov6n_320",
"scrypted_yolov6n",
"scrypted_yolov6s_320",
"scrypted_yolov6s",
"scrypted_yolov8n_320",
"scrypted_yolov8n",
"ssdlite_mobilenet_v2",
"yolov4-tiny",
]
def parse_label_contents(contents: str):
lines = contents.splitlines()
lines = contents.split(",")
lines = [line for line in lines if line.strip()]
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r"[:\s]+", content.strip(), maxsplit=1)
@@ -29,40 +47,49 @@ def parse_label_contents(contents: str):
return ret
class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings):
def parse_labels(userDefined):
yolo = userDefined.get("names") or userDefined.get("yolo.names")
if yolo:
j = ast.literal_eval(yolo)
ret = {}
for k, v in j.items():
ret[int(k)] = v
return ret
classes = userDefined.get("classes")
if not classes:
raise Exception("no classes found in model metadata")
return parse_label_contents(classes)
class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProvider):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
model = self.storage.getItem("model") or "Default"
if model == "Default":
model = "yolov8n_320"
if model == "Default" or model not in availableModels:
if model != "Default":
self.storage.setItem("model", "Default")
model = "scrypted_yolov9c_320"
self.yolo = "yolo" in model
self.yolov8 = "yolov8" in model
self.yolov9 = "yolov9" in model
model_version = "v2"
self.scrypted_yolo = "scrypted_yolo" in model
self.scrypted_model = "scrypted" in model
model_version = "v7"
mlmodel = "model" if self.scrypted_yolo else model
print(f"model: {model}")
if not self.yolo:
# todo convert these to mlpackage
labelsFile = self.downloadFile(
f"https://github.com/koush/coreml-models/raw/main/{model}/coco_labels.txt",
"coco_labels.txt",
)
modelFile = self.downloadFile(
f"https://github.com/koush/coreml-models/raw/main/{model}/{model}.mlmodel",
f"https://github.com/koush/coreml-models/raw/main/{model}/{mlmodel}.mlmodel",
f"{model}.mlmodel",
)
else:
if self.yolov8:
modelFile = self.downloadFile(
f"https://github.com/koush/coreml-models/raw/main/{model}/{model}.mlmodel",
f"{model}.mlmodel",
)
elif self.yolov9:
if self.scrypted_yolo:
files = [
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/weights/weight.bin",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{model}.mlmodel",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{mlmodel}.mlmodel",
f"{model}/{model}.mlpackage/Manifest.json",
]
@@ -77,7 +104,7 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/Metadata.json",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/weights/weight.bin",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{model}.mlmodel",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{mlmodel}.mlmodel",
f"{model}/{model}.mlpackage/Manifest.json",
]
@@ -88,25 +115,42 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
)
modelFile = os.path.dirname(p)
labelsFile = self.downloadFile(
f"https://github.com/koush/coreml-models/raw/main/{model}/coco_80cl.txt",
f"{model_version}/{model}/coco_80cl.txt",
)
self.model = ct.models.MLModel(modelFile)
self.modelspec = self.model.get_spec()
self.inputdesc = self.modelspec.description.input[0]
self.inputheight = self.inputdesc.type.imageType.height
self.inputwidth = self.inputdesc.type.imageType.width
self.input_name = self.model.get_spec().description.input[0].name
labels_contents = open(labelsFile, "r").read()
self.labels = parse_label_contents(labels_contents)
# csv in mobilenet model
# self.modelspec.description.metadata.userDefined['classes']
self.labels = parse_labels(self.modelspec.description.metadata.userDefined)
self.loop = asyncio.get_event_loop()
self.minThreshold = 0.2
asyncio.ensure_future(self.prepareRecognitionModels(), loop=self.loop)
async def prepareRecognitionModels(self):
try:
await scrypted_sdk.deviceManager.onDevicesChanged(
{
"devices": [
{
"nativeId": "recognition",
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
"interfaces": [
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
],
"name": "CoreML Recognition",
}
]
}
)
except:
pass
async def getDevice(self, nativeId: str) -> Any:
return CoreMLRecognition(nativeId)
async def getSettings(self) -> list[Setting]:
model = self.storage.getItem("model") or "Default"
return [
@@ -114,14 +158,7 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
"key": "model",
"title": "Model",
"description": "The detection model used to find objects.",
"choices": [
"Default",
"ssdlite_mobilenet_v2",
"yolov4-tiny",
"yolov8n",
"yolov8n_320",
"yolov9c_320",
],
"choices": availableModels,
"value": model,
},
]
@@ -137,23 +174,23 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
def get_input_size(self) -> Tuple[float, float]:
return (self.inputwidth, self.inputheight)
async def detect_batch(self, inputs: List[Any]) -> List[Any]:
out_dicts = await asyncio.get_event_loop().run_in_executor(
predictExecutor, lambda: self.model.predict(inputs)
)
return out_dicts
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
objs = []
# run in executor if this is the plugin loop
if self.yolo:
input_name = "image" if self.yolov8 or self.yolov9 else "input_1"
if asyncio.get_event_loop() is self.loop:
out_dict = await asyncio.get_event_loop().run_in_executor(
predictExecutor, lambda: self.model.predict({input_name: input})
)
else:
out_dict = self.model.predict({input_name: input})
out_dict = await self.queue_batch({self.input_name: input})
if self.yolov8 or self.yolov9:
if self.scrypted_yolo:
results = list(out_dict.values())[0][0]
objs = yolo.parse_yolov8(results)
objs = yolo.parse_yolov9(results)
ret = self.create_detection_result(objs, src_size, cvss)
return ret
@@ -187,17 +224,12 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
ret = self.create_detection_result(objs, src_size, cvss)
return ret
if asyncio.get_event_loop() is self.loop:
out_dict = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.model.predict(
{"image": input, "confidenceThreshold": self.minThreshold}
),
)
else:
out_dict = self.model.predict(
out_dict = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.model.predict(
{"image": input, "confidenceThreshold": self.minThreshold}
)
),
)
coordinatesList = out_dict["coordinates"].astype(float)

View File

@@ -0,0 +1,132 @@
from __future__ import annotations
import concurrent.futures
import os
import coremltools as ct
import numpy as np
# import Quartz
# from Foundation import NSData, NSMakeSize
# import Vision
from predict.recognize import RecognizeDetection
def euclidean_distance(arr1, arr2):
return np.linalg.norm(arr1 - arr2)
def cosine_similarity(vector_a, vector_b):
dot_product = np.dot(vector_a, vector_b)
norm_a = np.linalg.norm(vector_a)
norm_b = np.linalg.norm(vector_b)
similarity = dot_product / (norm_a * norm_b)
return similarity
predictExecutor = concurrent.futures.ThreadPoolExecutor(8, "Vision-Predict")
class CoreMLRecognition(RecognizeDetection):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
def downloadModel(self, model: str):
model_version = "v7"
mlmodel = "model"
files = [
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/weights/weight.bin",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{mlmodel}.mlmodel",
f"{model}/{model}.mlpackage/Manifest.json",
]
for f in files:
p = self.downloadFile(
f"https://github.com/koush/coreml-models/raw/main/{f}",
f"{model_version}/{f}",
)
modelFile = os.path.dirname(p)
model = ct.models.MLModel(modelFile)
inputName = model.get_spec().description.input[0].name
return model, inputName
def predictDetectModel(self, input):
model, inputName = self.detectModel
out_dict = model.predict({inputName: input})
results = list(out_dict.values())[0][0]
return results
def predictFaceModel(self, input):
model, inputName = self.faceModel
out_dict = model.predict({inputName: input})
return out_dict["var_2167"][0]
def predictTextModel(self, input):
model, inputName = self.textModel
out_dict = model.predict({inputName: input})
preds = out_dict["linear_2"]
return preds
# def predictVision(self, input: Image.Image) -> asyncio.Future[list[Prediction]]:
# buffer = input.tobytes()
# myData = NSData.alloc().initWithBytes_length_(buffer, len(buffer))
# input_image = (
# Quartz.CIImage.imageWithBitmapData_bytesPerRow_size_format_options_(
# myData,
# 4 * input.width,
# NSMakeSize(input.width, input.height),
# Quartz.kCIFormatRGBA8,
# None,
# )
# )
# request_handler = Vision.VNImageRequestHandler.alloc().initWithCIImage_options_(
# input_image, None
# )
# loop = self.loop
# future = loop.create_future()
# def detect_face_handler(request, error):
# observations = request.results()
# if error:
# loop.call_soon_threadsafe(future.set_exception, Exception())
# else:
# objs = []
# for o in observations:
# confidence = o.confidence()
# bb = o.boundingBox()
# origin = bb.origin
# size = bb.size
# l = origin.x * input.width
# t = (1 - origin.y - size.height) * input.height
# w = size.width * input.width
# h = size.height * input.height
# prediction = Prediction(
# 0, confidence, from_bounding_box((l, t, w, h))
# )
# objs.append(prediction)
# loop.call_soon_threadsafe(future.set_result, objs)
# request = (
# Vision.VNDetectFaceRectanglesRequest.alloc().initWithCompletionHandler_(
# detect_face_handler
# )
# )
# error = request_handler.performRequests_error_([request], None)
# return future
# async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
# future = await asyncio.get_event_loop().run_in_executor(
# predictExecutor,
# lambda: self.predictVision(input),
# )
# objs = await future
# ret = self.create_detection_result(objs, src_size, cvss)
# return ret

View File

@@ -1,6 +1,2 @@
#
coremltools==7.1
# pillow for anything not intel linux, pillow-simd is available on x64 linux
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
Pillow>=5.4.1

View File

@@ -1 +0,0 @@
../../openvino/src/yolo

View File

@@ -1,22 +1,23 @@
{
"name": "@scrypted/hikvision",
"version": "0.0.137",
"version": "0.0.147",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/hikvision",
"version": "0.0.137",
"version": "0.0.147",
"license": "Apache",
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",
"@types/xml2js": "^0.4.11",
"lodash": "^4.17.21",
"xml2js": "^0.6.0"
"@types/xml2js": "^0.4.14",
"content-type": "^1.0.5",
"xml2js": "^0.6.2"
},
"devDependencies": {
"@types/node": "^18.15.11"
"@types/content-type": "^1.1.8",
"@types/node": "^20.11.30"
}
},
"../../common": {
@@ -27,17 +28,16 @@
"@scrypted/sdk": "file:../sdk",
"@scrypted/server": "file:../server",
"http-auth-utils": "^5.0.1",
"node-fetch-commonjs": "^3.1.1",
"typescript": "^5.3.3"
},
"devDependencies": {
"@types/node": "^20.10.8",
"@types/node": "^20.11.0",
"ts-node": "^10.9.2"
}
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.3.4",
"version": "0.3.29",
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.18.6",
@@ -83,33 +83,50 @@
"resolved": "../../sdk",
"link": true
},
"node_modules/@types/content-type": {
"version": "1.1.8",
"resolved": "https://registry.npmjs.org/@types/content-type/-/content-type-1.1.8.tgz",
"integrity": "sha512-1tBhmVUeso3+ahfyaKluXe38p+94lovUZdoVfQ3OnJo9uJC42JT7CBoN3k9HYhAae+GwiBYmHu+N9FZhOG+2Pg==",
"dev": true
},
"node_modules/@types/node": {
"version": "18.15.11",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.11.tgz",
"integrity": "sha512-E5Kwq2n4SbMzQOn6wnmBjuK9ouqlURrcZDVfbo9ftDDTFt3nk7ZKK4GMOzoYgnpQJKcxwQw+lGaBvvlMo0qN/Q=="
"version": "20.11.30",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
"integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/@types/xml2js": {
"version": "0.4.11",
"resolved": "https://registry.npmjs.org/@types/xml2js/-/xml2js-0.4.11.tgz",
"integrity": "sha512-JdigeAKmCyoJUiQljjr7tQG3if9NkqGUgwEUqBvV0N7LM4HyQk7UXCnusRa1lnvXAEYJ8mw8GtZWioagNztOwA==",
"version": "0.4.14",
"resolved": "https://registry.npmjs.org/@types/xml2js/-/xml2js-0.4.14.tgz",
"integrity": "sha512-4YnrRemBShWRO2QjvUin8ESA41rH+9nQGLUGZV/1IDhi3SL9OhdpNC/MrulTWuptXKwhx/aDxE7toV0f/ypIXQ==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
"node_modules/content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/sax": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
"integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw=="
},
"node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
},
"node_modules/xml2js": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.0.tgz",
"integrity": "sha512-eLTh0kA8uHceqesPqSE+VvO1CDDJWMwlQfB6LuN6T8w6MaDJ8Txm8P7s5cHD0miF0V+GGTZrDQfxPZQVsur33w==",
"version": "0.6.2",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.2.tgz",
"integrity": "sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA==",
"dependencies": {
"sax": ">=0.6.0",
"xmlbuilder": "~11.0.0"
@@ -133,9 +150,8 @@
"requires": {
"@scrypted/sdk": "file:../sdk",
"@scrypted/server": "file:../server",
"@types/node": "^20.10.8",
"@types/node": "^20.11.0",
"http-auth-utils": "^5.0.1",
"node-fetch-commonjs": "^3.1.1",
"ts-node": "^10.9.2",
"typescript": "^5.3.3"
}
@@ -164,33 +180,47 @@
"webpack-bundle-analyzer": "^4.5.0"
}
},
"@types/content-type": {
"version": "1.1.8",
"resolved": "https://registry.npmjs.org/@types/content-type/-/content-type-1.1.8.tgz",
"integrity": "sha512-1tBhmVUeso3+ahfyaKluXe38p+94lovUZdoVfQ3OnJo9uJC42JT7CBoN3k9HYhAae+GwiBYmHu+N9FZhOG+2Pg==",
"dev": true
},
"@types/node": {
"version": "18.15.11",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.11.tgz",
"integrity": "sha512-E5Kwq2n4SbMzQOn6wnmBjuK9ouqlURrcZDVfbo9ftDDTFt3nk7ZKK4GMOzoYgnpQJKcxwQw+lGaBvvlMo0qN/Q=="
"version": "20.11.30",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
"integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"requires": {
"undici-types": "~5.26.4"
}
},
"@types/xml2js": {
"version": "0.4.11",
"resolved": "https://registry.npmjs.org/@types/xml2js/-/xml2js-0.4.11.tgz",
"integrity": "sha512-JdigeAKmCyoJUiQljjr7tQG3if9NkqGUgwEUqBvV0N7LM4HyQk7UXCnusRa1lnvXAEYJ8mw8GtZWioagNztOwA==",
"version": "0.4.14",
"resolved": "https://registry.npmjs.org/@types/xml2js/-/xml2js-0.4.14.tgz",
"integrity": "sha512-4YnrRemBShWRO2QjvUin8ESA41rH+9nQGLUGZV/1IDhi3SL9OhdpNC/MrulTWuptXKwhx/aDxE7toV0f/ypIXQ==",
"requires": {
"@types/node": "*"
}
},
"lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
"content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA=="
},
"sax": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
"integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw=="
},
"undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
},
"xml2js": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.0.tgz",
"integrity": "sha512-eLTh0kA8uHceqesPqSE+VvO1CDDJWMwlQfB6LuN6T8w6MaDJ8Txm8P7s5cHD0miF0V+GGTZrDQfxPZQVsur33w==",
"version": "0.6.2",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.2.tgz",
"integrity": "sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA==",
"requires": {
"sax": ">=0.6.0",
"xmlbuilder": "~11.0.0"

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/hikvision",
"version": "0.0.137",
"version": "0.0.147",
"description": "Hikvision Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",
@@ -37,11 +37,12 @@
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",
"@types/xml2js": "^0.4.11",
"lodash": "^4.17.21",
"xml2js": "^0.6.0"
"@types/xml2js": "^0.4.14",
"content-type": "^1.0.5",
"xml2js": "^0.6.2"
},
"devDependencies": {
"@types/node": "^18.15.11"
"@types/content-type": "^1.1.8",
"@types/node": "^20.11.30"
}
}

View File

@@ -1,8 +1,17 @@
import { AuthFetchCredentialState, HttpFetchOptions, authHttpFetch } from '@scrypted/common/src/http-auth-fetch';
import { readLine } from '@scrypted/common/src/read-stream';
import { parseHeaders, readBody, readMessage } from '@scrypted/common/src/rtsp-server';
import contentType from 'content-type';
import { IncomingMessage } from 'http';
import { Readable } from 'stream';
import { EventEmitter, Readable } from 'stream';
import { Destroyable } from '../../rtsp/src/rtsp';
import { getDeviceInfo } from './probe';
export const detectionMap = {
human: 'person',
vehicle: 'car',
}
export function getChannel(channel: string) {
return channel || '101';
}
@@ -15,6 +24,8 @@ export enum HikvisionCameraEvent {
// <eventType>linedetection</eventType>
// <eventState>inactive</eventState>
LineDetection = "<eventType>linedetection</eventType>",
RegionEntrance = "<eventType>regionEntrance</eventType>",
RegionExit = "<eventType>regionExit</eventType>",
// <eventType>fielddetection</eventType>
// <eventState>active</eventState>
// <eventType>fielddetection</eventType>
@@ -31,7 +42,7 @@ export interface HikvisionCameraStreamSetup {
export class HikvisionCameraAPI {
credential: AuthFetchCredentialState;
deviceModel: Promise<string>;
listenerPromise: Promise<IncomingMessage>;
listenerPromise: Promise<Destroyable>;
constructor(public ip: string, username: string, password: string, public console: Console) {
this.credential = {
@@ -129,35 +140,106 @@ export class HikvisionCameraAPI {
return response.body;
}
async listenEvents() {
async listenEvents(): Promise<Destroyable> {
const events = new EventEmitter();
(events as any).destroy = () => { };
// support multiple cameras listening to a single single stream
if (!this.listenerPromise) {
const url = `http://${this.ip}/ISAPI/Event/notification/alertStream`;
let lastSmartDetection: string;
this.listenerPromise = this.request({
url,
responseType: 'readable',
}).then(response => {
const stream = response.body;
const stream: IncomingMessage = response.body;
(events as any).destroy = () => {
stream.destroy();
events.removeAllListeners();
};
stream.on('close', () => {
this.listenerPromise = undefined;
events.emit('close');
});
stream.on('end', () => {
this.listenerPromise = undefined;
events.emit('end');
});
stream.on('error', e => {
this.listenerPromise = undefined;
events.emit('error', e);
});
stream.socket.setKeepAlive(true);
stream.on('data', (buffer: Buffer) => {
const data = buffer.toString();
for (const event of Object.values(HikvisionCameraEvent)) {
if (data.indexOf(event) !== -1) {
const cameraNumber = data.match(/<channelID>(.*?)</)?.[1] || data.match(/<dynChannelID>(.*?)</)?.[1];
const inactive = data.indexOf('<eventState>inactive</eventState>') !== -1;
stream.emit('event', event, cameraNumber, inactive, data);
const ct = stream.headers['content-type'];
// make content type parsable as content disposition filename
const cd = contentType.parse(ct);
let { boundary } = cd.parameters;
boundary = `--${boundary}`;
const boundaryEnd = `${boundary}--`;
(async () => {
while (true) {
let ignore = await readLine(stream);
ignore = ignore.trim();
if (!ignore)
continue;
if (ignore === boundaryEnd)
continue;
if (ignore !== boundary) {
this.console.error('expected boundary but found', ignore);
throw new Error('expected boundary');
}
const message = await readMessage(stream);
events.emit('data', message);
message.unshift('');
const headers = parseHeaders(message);
const body = await readBody(stream, headers);
try {
if (!headers['content-type'].includes('application/xml') && lastSmartDetection) {
if (!headers['content-type']?.startsWith('image/jpeg')) {
continue;
}
events.emit('smart', lastSmartDetection, body);
lastSmartDetection = undefined;
continue;
}
}
finally {
// is it possible that smart detections are sent without images?
// if so, flush this detection.
if (lastSmartDetection) {
events.emit('smart', lastSmartDetection);
}
}
const data = body.toString();
events.emit('data', data);
for (const event of Object.values(HikvisionCameraEvent)) {
if (data.indexOf(event) !== -1) {
const cameraNumber = data.match(/<channelID>(.*?)</)?.[1] || data.match(/<dynChannelID>(.*?)</)?.[1];
const inactive = data.indexOf('<eventState>inactive</eventState>') !== -1;
events.emit('event', event, cameraNumber, inactive, data);
if (event === HikvisionCameraEvent.LineDetection
|| event === HikvisionCameraEvent.RegionEntrance
|| event === HikvisionCameraEvent.RegionExit
|| event === HikvisionCameraEvent.FieldDetection) {
lastSmartDetection = data;
}
}
}
}
});
return stream;
})()
.catch(() => stream.destroy());
return events as any as Destroyable;
});
this.listenerPromise.catch(() => this.listenerPromise = undefined);
this.listenerPromise.then(stream => {
stream.on('close', () => this.listenerPromise = undefined);
stream.on('end', () => this.listenerPromise = undefined);
});
}
return this.listenerPromise;

View File

@@ -1,11 +1,12 @@
import sdk, { Camera, DeviceCreatorSettings, DeviceInformation, FFmpegInput, Intercom, MediaObject, MediaStreamOptions, Reboot, RequestPictureOptions, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting } from "@scrypted/sdk";
import sdk, { Camera, DeviceCreatorSettings, DeviceInformation, FFmpegInput, Intercom, MediaObject, MediaStreamOptions, ObjectDetectionResult, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, Reboot, RequestPictureOptions, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting } from "@scrypted/sdk";
import crypto from 'crypto';
import { PassThrough } from "stream";
import xml2js from 'xml2js';
import { RtpPacket } from '../../../external/werift/packages/rtp/src/rtp/rtp';
import { OnvifIntercom } from "../../onvif/src/onvif-intercom";
import { RtspProvider, RtspSmartCamera, UrlMediaStreamOptions } from "../../rtsp/src/rtsp";
import { startRtpForwarderProcess } from '../../webrtc/src/rtp-forwarders';
import { HikvisionCameraAPI, HikvisionCameraEvent } from "./hikvision-camera-api";
import { HikvisionCameraAPI, HikvisionCameraEvent, detectionMap } from "./hikvision-camera-api";
const { mediaManager } = sdk;
@@ -15,15 +16,17 @@ function channelToCameraNumber(channel: string) {
return channel.substring(0, channel.length - 2);
}
class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboot {
class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboot, ObjectDetector {
detectedChannels: Promise<Map<string, MediaStreamOptions>>;
client: HikvisionCameraAPI;
onvifIntercom = new OnvifIntercom(this);
activeIntercom: Awaited<ReturnType<typeof startRtpForwarderProcess>>;
hasSmartDetection: boolean;
constructor(nativeId: string, provider: RtspProvider) {
super(nativeId, provider);
this.hasSmartDetection = this.storage.getItem('hasSmartDetection') === 'true';
this.updateDevice();
this.updateDeviceInfo();
}
@@ -63,41 +66,52 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
let ignoreCameraNumber: boolean;
const motionTimeoutDuration = 20000;
// check if the camera+channel field is in use, and filter events.
const checkCameraNumber = async (cameraNumber: string) => {
// check if the camera+channel field is in use, and filter events.
if (this.getRtspChannel()) {
// it is possible to set it up to use a camera number
// on an nvr IP (which gives RTSP urls through the NVR), but then use a http port
// that gives a filtered event stream from only that camera.
// this this case, the camera numbers will not
// match as they will be always be "1".
// to detect that a camera specific endpoint is being used
// can look at the channel ids, and see if that camera number is found.
// this is different from the use case where the NVR or camera
// is using a port other than 80 (the default).
// could add a setting to have the user explicitly denote nvr usage
// but that is error prone.
const userCameraNumber = this.getCameraNumber();
if (ignoreCameraNumber === undefined && this.detectedChannels) {
const channelIds = (await this.detectedChannels).keys();
ignoreCameraNumber = true;
for (const id of channelIds) {
if (channelToCameraNumber(id) === userCameraNumber) {
ignoreCameraNumber = false;
break;
}
}
}
if (!ignoreCameraNumber && cameraNumber !== userCameraNumber) {
// this.console.error(`### Skipping motion event ${cameraNumber} != ${this.getCameraNumber()}`);
return false;
}
}
return true;
};
events.on('event', async (event: HikvisionCameraEvent, cameraNumber: string, inactive: boolean) => {
if (event === HikvisionCameraEvent.MotionDetected
|| event === HikvisionCameraEvent.LineDetection
|| event === HikvisionCameraEvent.RegionEntrance
|| event === HikvisionCameraEvent.RegionExit
|| event === HikvisionCameraEvent.FieldDetection) {
// check if the camera+channel field is in use, and filter events.
if (this.getRtspChannel()) {
// it is possible to set it up to use a camera number
// on an nvr IP (which gives RTSP urls through the NVR), but then use a http port
// that gives a filtered event stream from only that camera.
// this this case, the camera numbers will not
// match as they will be always be "1".
// to detect that a camera specific endpoint is being used
// can look at the channel ids, and see if that camera number is found.
// this is different from the use case where the NVR or camera
// is using a port other than 80 (the default).
// could add a setting to have the user explicitly denote nvr usage
// but that is error prone.
const userCameraNumber = this.getCameraNumber();
if (ignoreCameraNumber === undefined && this.detectedChannels) {
const channelIds = (await this.detectedChannels).keys();
ignoreCameraNumber = true;
for (const id of channelIds) {
if (channelToCameraNumber(id) === userCameraNumber) {
ignoreCameraNumber = false;
break;
}
}
}
if (!ignoreCameraNumber && cameraNumber !== userCameraNumber) {
// this.console.error(`### Skipping motion event ${cameraNumber} != ${this.getCameraNumber()}`);
return;
}
}
if (!await checkCameraNumber(cameraNumber))
return;
this.motionDetected = true;
clearTimeout(motionTimeout);
@@ -106,11 +120,107 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
this.motionDetected = false;
}, motionTimeoutDuration);
}
})
});
let inputDimensions: [number, number];
events.on('smart', async (data: string, image: Buffer) => {
if (!this.hasSmartDetection) {
this.hasSmartDetection = true;
this.storage.setItem('hasSmartDetection', 'true');
this.updateDevice();
}
const xml = await xml2js.parseStringPromise(data);
const [channelId] = xml.EventNotificationAlert.channelID;
if (!await checkCameraNumber(channelId)) {
this.console.warn('chann fail')
return;
}
const now = Date.now();
let detections: ObjectDetectionResult[] = xml.EventNotificationAlert?.DetectionRegionList?.map(region => {
const { DetectionRegionEntry } = region;
const dre = DetectionRegionEntry[0];
if (!DetectionRegionEntry)
return;
const { detectionTarget } = dre;
// const { TargetRect } = dre;
// const { X, Y, width, height } = TargetRect[0];
const [name] = detectionTarget;
return {
score: 1,
className: detectionMap[name] || name,
// boundingBox: [
// parseInt(X),
// parseInt(Y),
// parseInt(width),
// parseInt(height),
// ],
// movement: {
// moving: true,
// firstSeen: now,
// lastSeen: now,
// }
} as ObjectDetectionResult;
});
detections = detections?.filter(d => d);
if (!detections?.length)
return;
// if (inputDimensions === undefined && loadSharp()) {
// try {
// const { image: i, metadata } = await loadVipsMetadata(image);
// i.destroy();
// inputDimensions = [metadata.width, metadata.height];
// }
// catch (e) {
// inputDimensions = null;
// }
// finally {
// }
// }
let detectionId: string;
if (image) {
detectionId = crypto.randomBytes(4).toString('hex');
this.recentDetections.set(detectionId, image);
setTimeout(() => this.recentDetections.delete(detectionId), 10000);
}
const detected: ObjectsDetected = {
inputDimensions,
detectionId,
timestamp: now,
detections,
};
this.onDeviceEvent(ScryptedInterface.ObjectDetector, detected);
});
return events;
}
recentDetections = new Map<string, Buffer>();
async getDetectionInput(detectionId: string, eventId?: any): Promise<MediaObject> {
const image = this.recentDetections.get(detectionId);
if (!image)
return;
return mediaManager.createMediaObject(image, 'image/jpeg');
}
async getObjectTypes(): Promise<ObjectDetectionTypes> {
return {
classes: [
...Object.values(detectionMap),
]
}
}
createClient() {
return new HikvisionCameraAPI(this.getHttpAddress(), this.getUsername(), this.getPassword(), this.console);
}
@@ -284,6 +394,9 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
interfaces.push(ScryptedInterface.Intercom);
}
if (this.hasSmartDetection)
interfaces.push(ScryptedInterface.ObjectDetector);
this.provider.updateDevice(this.nativeId, this.name, interfaces, type);
}
@@ -408,7 +521,7 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
const put = this.getClient().request({
url,
method: 'PUT',
responseType: 'readable',
responseType: 'text',
headers: {
'Content-Type': 'application/octet-stream',
// 'Connection': 'close',
@@ -440,6 +553,12 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
forwarder.killPromise.finally(() => {
this.console.log('audio finished');
passthrough.end();
setTimeout(() => {
this.stopIntercom();
}, 1000);
});
put.finally(() => {
this.stopIntercom();
});
@@ -448,7 +567,7 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
if (response.statusCode !== 200)
forwarder.kill();
})
.catch(() => forwarder.kill());
.catch(() => forwarder.kill());
}
async stopIntercom(): Promise<void> {
@@ -581,4 +700,4 @@ class HikvisionProvider extends RtspProvider {
}
}
export default new HikvisionProvider();
export default HikvisionProvider;

View File

@@ -1,4 +1,4 @@
{
"scrypted.debugHost": "scrypted-server"
"scrypted.debugHost": "127.0.0.1"
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/homekit",
"version": "1.2.43",
"version": "1.2.54",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/homekit",
"version": "1.2.43",
"version": "1.2.54",
"dependencies": {
"@koush/werift-src": "file:../../external/werift",
"check-disk-space": "^3.4.0",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/homekit",
"version": "1.2.43",
"version": "1.2.54",
"description": "HomeKit Plugin for Scrypted",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",

View File

@@ -66,7 +66,7 @@ export function createHAPUsername() {
}
export function getAddresses() {
const addresses = Object.entries(os.networkInterfaces()).filter(([iface]) => iface.startsWith('en') || iface.startsWith('eth') || iface.startsWith('wlan')).map(([_, addr]) => addr).flat().map(info => info.address).filter(address => address);
const addresses = Object.entries(os.networkInterfaces()).filter(([iface]) => iface.startsWith('en') || iface.startsWith('eth') || iface.startsWith('wlan') || iface.startsWith('net')).map(([_, addr]) => addr).flat().map(info => info.address).filter(address => address);
return addresses;
}
@@ -74,13 +74,17 @@ export function getRandomPort() {
return Math.round(30000 + Math.random() * 20000);
}
export function createHAPUsernameStorageSettingsDict(device: { storage: Storage, name?: string }, group: string, subgroup?: string): StorageSettingsDict<'mac' | 'qrCode' | 'pincode' | 'portOverride' | 'resetAccessory'> {
export function createHAPUsernameStorageSettingsDict(device: { storage: Storage, name?: string }, group: string, subgroup?: string): StorageSettingsDict<'mac' | 'addIdentifyingMaterial' | 'qrCode' | 'pincode' | 'portOverride' | 'resetAccessory'> {
const alertReload = () => {
sdk.log.a(`The HomeKit plugin will reload momentarily for the changes to ${device.name} to take effect.`);
sdk.deviceManager.requestRestart();
}
return {
addIdentifyingMaterial: {
hide: true,
type: 'boolean',
},
qrCode: {
group,
// subgroup,

View File

@@ -267,6 +267,9 @@ export class HomeKitPlugin extends ScryptedDeviceBase implements MixinProvider,
},
undefined, 'Pairing'));
storageSettings.settings.pincode.persistedDefaultValue = randomPinCode();
// TODO: change this value after this current default has been persisted to existing clients.
// changing it now will cause existing accessories be renamed.
storageSettings.settings.addIdentifyingMaterial.persistedDefaultValue = false;
const mixinConsole = deviceManager.getMixinConsole(device.id, this.nativeId);
@@ -277,7 +280,7 @@ export class HomeKitPlugin extends ScryptedDeviceBase implements MixinProvider,
published = true;
mixinConsole.log('Device is in accessory mode and is online. HomeKit services are being published.');
await this.publishAccessory(accessory, storageSettings.values.mac, storageSettings.values.pincode, standaloneCategory, storageSettings.values.portOverride);
await this.publishAccessory(accessory, storageSettings.values.mac, storageSettings.values.pincode, standaloneCategory, storageSettings.values.portOverride, storageSettings.values.addIdentifyingMaterial);
if (!hasPublished) {
hasPublished = true;
storageSettings.values.qrCode = accessory.setupURI();
@@ -420,7 +423,7 @@ export class HomeKitPlugin extends ScryptedDeviceBase implements MixinProvider,
return bind;
}
async publishAccessory(accessory: Accessory, username: string, pincode: string, category: Categories, port: number) {
async publishAccessory(accessory: Accessory, username: string, pincode: string, category: Categories, port: number, addIdentifyingMaterial: boolean) {
const bind = await this.getAdvertiserInterfaceBind();
await accessory.publish({
@@ -428,7 +431,7 @@ export class HomeKitPlugin extends ScryptedDeviceBase implements MixinProvider,
port,
pincode,
category,
addIdentifyingMaterial: false,
addIdentifyingMaterial,
advertiser: this.getAdvertiser(),
bind,
});

View File

@@ -103,24 +103,22 @@ addSupportedType({
const isRecordingEnabled = device.interfaces.includes(ScryptedInterface.MotionSensor);
let configuration: CameraRecordingConfiguration;
const openRecordingStreams = new Map<number, Deferred<any>>();
const openRecordingStreams = new Map<number, AsyncGenerator<RecordingPacket>>();
if (isRecordingEnabled) {
recordingDelegate = {
updateRecordingConfiguration(newConfiguration: CameraRecordingConfiguration) {
configuration = newConfiguration;
},
handleRecordingStreamRequest(streamId: number): AsyncGenerator<RecordingPacket> {
const ret = handleFragmentsRequests(streamId, device, configuration, console, homekitPlugin);
const d = new Deferred<any>();
d.promise.then(reason => {
ret.throw(new Error(reason.toString()));
openRecordingStreams.delete(streamId);
});
openRecordingStreams.set(streamId, d);
const ret = handleFragmentsRequests(streamId, device, configuration, console, homekitPlugin,
() => openRecordingStreams.has(streamId));
openRecordingStreams.set(streamId, ret);
return ret;
},
closeRecordingStream(streamId, reason) {
openRecordingStreams.get(streamId)?.resolve(reason);
const r = openRecordingStreams.get(streamId);
r?.throw(new Error(reason?.toString()));
openRecordingStreams.delete(streamId);
},
updateRecordingActive(active) {
},

View File

@@ -67,12 +67,29 @@ async function checkMp4StartsWithKeyFrame(console: Console, mp4: Buffer) {
await timeoutPromise(1000, new Promise(resolve => cp.on('exit', resolve)));
const h264 = Buffer.concat(buffers);
let offset = 0;
let countedZeroes = 0;
while (offset < h264.length - 6) {
if (h264.readInt32BE(offset) !== 1) {
const byte = h264[offset];
if (byte === 0) {
countedZeroes = Math.min(4, countedZeroes + 1);
offset++;
continue;
}
offset += 4;
if (countedZeroes < 2) {
countedZeroes = 0;
offset++
continue;
}
countedZeroes = 0;
if (byte !== 1) {
offset++;
continue;
}
offset++;
let naluType = h264.readUInt8(offset) & 0x1f;
if (naluType === NAL_TYPE_FU_A) {
offset++;
@@ -99,7 +116,7 @@ async function checkMp4StartsWithKeyFrame(console: Console, mp4: Buffer) {
}
export async function* handleFragmentsRequests(streamId: number, device: ScryptedDevice & VideoCamera & MotionSensor & AudioSensor,
configuration: CameraRecordingConfiguration, console: Console, homekitPlugin: HomeKitPlugin): AsyncGenerator<RecordingPacket> {
configuration: CameraRecordingConfiguration, console: Console, homekitPlugin: HomeKitPlugin, isOpen: () => boolean): AsyncGenerator<RecordingPacket> {
// homekitPlugin.storageSettings.values.lastKnownHomeHub = connection.remoteAddress;
@@ -177,7 +194,7 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
}
let audioArgs: string[];
if (transcodeRecording || isDefinitelyNotAAC || debugMode.audio) {
if (!noAudio && (transcodeRecording || isDefinitelyNotAAC || debugMode.audio)) {
if (!(transcodeRecording || debugMode.audio))
console.warn('Recording audio is not explicitly AAC, forcing transcoding. Setting audio output to AAC is recommended.', audioCodec);
@@ -302,6 +319,7 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
let needSkip = true;
let ftyp: Buffer[];
let moov: Buffer[];
for await (const box of generator) {
const { header, type, data } = box;
// console.log('motion fragment box', type);
@@ -314,7 +332,7 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
checkMp4 = false;
// pending will contain the moof
try {
if (!await checkMp4StartsWithKeyFrame(console, Buffer.concat([...ftyp, ...moov, ...pending, header, data]))) {
if (false && !await checkMp4StartsWithKeyFrame(console, Buffer.concat([...ftyp, ...moov, ...pending, header, data]))) {
needSkip = false;
pending = [];
continue;
@@ -343,17 +361,19 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
data: fragment,
isLast,
}
if (!isOpen())
return;
yield recordingPacket;
if (wasLast)
break;
}
}
console.log(`motion recording finished`);
}
catch (e) {
console.log(`motion recording completed ${e}`);
}
finally {
console.log(`motion recording finished`);
clearTimeout(videoTimeout);
cleanupPipes();
recordingFile?.end();

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/mqtt",
"version": "0.0.77",
"version": "0.0.80",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/mqtt",
"version": "0.0.77",
"version": "0.0.80",
"dependencies": {
"aedes": "^0.46.1",
"axios": "^0.23.0",

View File

@@ -41,5 +41,5 @@
"@types/node": "^18.4.2",
"@types/nunjucks": "^3.2.0"
},
"version": "0.0.77"
"version": "0.0.80"
}

View File

@@ -6,6 +6,7 @@ import nunjucks from 'nunjucks';
import sdk from "@scrypted/sdk";
import type { MqttProvider } from './main';
import { getHsvFromXyColor, getXyYFromHsvColor } from './color-util';
import { MqttEvent } from './api/mqtt-client';
const { deviceManager } = sdk;
@@ -25,7 +26,7 @@ typeMap.set('light', {
const interfaces = [ScryptedInterface.OnOff, ScryptedInterface.Brightness];
if (config.color_mode) {
config.supported_color_modes.forEach(color_mode => {
if (color_mode === 'xy')
if (color_mode === 'xy')
interfaces.push(ScryptedInterface.ColorSettingHsv);
else if (color_mode === 'hs')
interfaces.push(ScryptedInterface.ColorSettingHsv);
@@ -246,7 +247,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
return;
}
this.debounceCallbacks = new Map<string, Set<(payload: Buffer) => void>>();
this.debounceCallbacks = new Map<string, Set<(payload: Buffer) => void>>();
const { client } = provider;
client.on('message', this.listener.bind(this));
@@ -297,7 +298,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
this.console.log('binding...');
const { client } = this.provider;
this.debounceCallbacks = new Map<string, Set<(payload: Buffer) => void>>();
this.debounceCallbacks = new Map<string, Set<(payload: Buffer) => void>>();
if (this.providedInterfaces.includes(ScryptedInterface.Online)) {
const config = this.loadComponentConfig(ScryptedInterface.Online);
@@ -468,7 +469,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.command_off_template,
command, "ON");
} else {
this.publishValue(config.command_topic,
this.publishValue(config.command_topic,
undefined, command, command);
}
}
@@ -489,7 +490,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.command_on_template,
command, "ON");
} else {
this.publishValue(config.command_topic,
this.publishValue(config.command_topic,
undefined, command, command);
}
}
@@ -506,8 +507,8 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.brightness_value_template,
scaledBrightness, scaledBrightness);
} else {
this.publishValue(config.command_topic,
`{ "state": "${ scaledBrightness === 0 ? 'OFF' : 'ON'}", "brightness": ${scaledBrightness} }`,
this.publishValue(config.command_topic,
`{ "state": "${scaledBrightness === 0 ? 'OFF' : 'ON'}", "brightness": ${scaledBrightness} }`,
scaledBrightness, 255);
}
}
@@ -525,7 +526,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
if (kelvin >= 0 || kelvin <= 100) {
const min = await this.getTemperatureMinK();
const max = await this.getTemperatureMaxK();
const diff = (max - min) * (kelvin/100);
const diff = (max - min) * (kelvin / 100);
kelvin = Math.round(min + diff);
}
@@ -542,7 +543,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.color_temp_command_template,
color, color);
} else {
this.publishValue(config.command_topic,
this.publishValue(config.command_topic,
undefined, color, color);
}
}
@@ -567,7 +568,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.hs_command_template,
color, color);
} else {
this.publishValue(config.command_topic,
this.publishValue(config.command_topic,
undefined, color, color);
}
} else if (this.colorMode === "xy") {
@@ -589,12 +590,12 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.xy_command_template,
color, color);
} else {
this.publishValue(config.command_topic,
this.publishValue(config.command_topic,
undefined, color, color);
}
}
}
}
async lock(): Promise<void> {
const config = this.loadComponentConfig(ScryptedInterface.Lock);
return this.publishValue(config.command_topic,
@@ -610,6 +611,9 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
interface AutoDiscoveryConfig {
component: string;
create: (mqttId: string, device: MixinDeviceBase<any>, topic: string) => any;
subscriptions?: {
[topic: string]: (device: MixinDeviceBase<any>, event: MqttEvent) => void;
}
}
const autoDiscoveryMap = new Map<string, AutoDiscoveryConfig>();
@@ -676,7 +680,31 @@ autoDiscoveryMap.set(ScryptedInterface.HumiditySensor, {
}
});
export function publishAutoDiscovery(mqttId: string, client: Client, device: MixinDeviceBase<any>, topic: string, autoDiscoveryPrefix = 'homeassistant') {
autoDiscoveryMap.set(ScryptedInterface.OnOff, {
component: 'switch',
create(mqttId, device, topic) {
return {
payload_on: 'true',
payload_off: 'false',
state_topic: `${topic}/${ScryptedInterfaceProperty.on}`,
command_topic: `${topic}/${ScryptedInterfaceProperty.on}/set`,
...getAutoDiscoveryDevice(device, mqttId),
}
},
subscriptions: {
'on/set': (device, event) => {
const d = sdk.systemManager.getDeviceById<OnOff>(device.id);
if (event.json)
d.turnOn();
else
d.turnOff();
}
},
});
export function publishAutoDiscovery(mqttId: string, client: Client, device: MixinDeviceBase<any>, topic: string, subscribe: boolean, autoDiscoveryPrefix = 'homeassistant') {
const subs = new Set<string>();
for (const iface of device.interfaces) {
const found = autoDiscoveryMap.get(iface);
if (!found)
@@ -691,5 +719,38 @@ export function publishAutoDiscovery(mqttId: string, client: Client, device: Mix
client.publish(configTopic, JSON.stringify(config), {
retain: true,
});
if (subscribe) {
const subscriptions = found.subscriptions || {};
for (const subscriptionTopic of Object.keys(subscriptions || {})) {
subs.add(subscriptionTopic);
const fullTopic = topic + '/' + subscriptionTopic;
const cb = subscriptions[subscriptionTopic];
client.subscribe(fullTopic)
client.on('message', (messageTopic, message) => {
if (fullTopic !== messageTopic && fullTopic !== '/' + messageTopic)
return;
device.console.log('mqtt message', subscriptionTopic, message.toString());
cb(device, {
get text() {
return message.toString();
},
get json() {
try {
return JSON.parse(message.toString());
}
catch (e) {
}
},
get buffer() {
return message;
}
})
});
}
}
}
return subs;
}

View File

@@ -294,13 +294,15 @@ class MqttPublisherMixin extends SettingsMixinDeviceBase<any> {
allProperties.push(...properties);
}
let found: ReturnType<typeof publishAutoDiscovery>;
client.on('connect', packet => {
this.console.log('MQTT client connected, publishing current state.');
for (const method of allMethods) {
client.subscribe(this.pathname + '/' + method);
}
publishAutoDiscovery(this.provider.storageSettings.values.mqttId, client, this, this.pathname, 'homeassistant');
found = publishAutoDiscovery(this.provider.storageSettings.values.mqttId, client, this, this.pathname, true, 'homeassistant');
client.subscribe('homeassistant/status');
this.publishState(client);
});
@@ -311,14 +313,17 @@ class MqttPublisherMixin extends SettingsMixinDeviceBase<any> {
client.on('message', async (messageTopic, message) => {
if (messageTopic === 'homeassistant/status') {
publishAutoDiscovery(this.provider.storageSettings.values.mqttId, client, this, this.pathname, 'homeassistant');
publishAutoDiscovery(this.provider.storageSettings.values.mqttId, client, this, this.pathname, false, 'homeassistant');
this.publishState(client);
return;
}
const method = messageTopic.substring(this.pathname.length + 1);
if (!allMethods.includes(method)) {
if (!allProperties.includes(method))
this.console.warn('unknown topic', method);
if (!allProperties.includes(method)) {
if (!found?.has(method)) {
this.console.warn('unknown topic', method);
}
}
return;
}
try {
@@ -592,7 +597,7 @@ export class MqttProvider extends ScryptedDeviceBase implements DeviceProvider,
return isPublishable(type, interfaces) ? [ScryptedInterface.Settings] : undefined;
}
async getMixin(mixinDevice: any, mixinDeviceInterfaces: ScryptedInterface[], mixinDeviceState:WritableDeviceState): Promise<any> {
async getMixin(mixinDevice: any, mixinDeviceInterfaces: ScryptedInterface[], mixinDeviceState: WritableDeviceState): Promise<any> {
return new MqttPublisherMixin(this, {
mixinDevice,
mixinDeviceState,

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/objectdetector",
"version": "0.1.33",
"version": "0.1.39",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/objectdetector",
"version": "0.1.33",
"version": "0.1.39",
"license": "Apache-2.0",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/objectdetector",
"version": "0.1.33",
"version": "0.1.39",
"description": "Scrypted Video Analysis Plugin. Installed alongside a detection service like OpenCV or TensorFlow.",
"author": "Scrypted",
"license": "Apache-2.0",

View File

@@ -0,0 +1,75 @@
// visual similarity
const similarCharacters = [
['0', 'O', 'D'],
['1', 'I'],
['2', 'Z'],
['4', 'A'],
['5', 'S'],
['8', 'B'],
['6', 'G'],
// not sure about this one.
['A', '4'],
['C', 'G'],
['E', 'F'],
];
const similarCharactersMap = new Map<string, Set<string>>();
for (const similarCharacter of similarCharacters) {
for (const character of similarCharacter) {
if (!similarCharactersMap.has(character)) {
similarCharactersMap.set(character, new Set());
}
for (const similar of similarCharacter) {
similarCharactersMap.get(character)!.add(similar);
}
}
}
function isSameCharacter(c1: string, c2: string) {
if (c1 === c2)
return true;
return similarCharactersMap.get(c1)?.has(c2);
}
export function levenshteinDistance(str1: string, str2: string): number {
// todo: handle lower/uppercase similarity in similarCharacters above.
// ie, b is visualy similar to 6, but does not really look like B.
// others include e and C. v, u and Y. l, i, 1.
str1 = str1.toUpperCase();
str2 = str2.toUpperCase();
const len1 = str1.length;
const len2 = str2.length;
// If either string is empty, the distance is the length of the other string
if (len1 === 0) return len2;
if (len2 === 0) return len1;
let prev: number[] = new Array(len2 + 1);
let curr: number[] = new Array(len2 + 1);
// Initialize the first row of the matrix to be the index of the second string
for (let i = 0; i <= len2; i++) {
prev[i] = i;
}
for (let i = 1; i <= len1; i++) {
// Initialize the current row with the distance from the previous row's first element
curr[0] = i;
for (let j = 1; j <= len2; j++) {
let cost = isSameCharacter(str1.charAt(i - 1), str2.charAt(j - 1)) ? 0 : 1;
// Compute the minimum of three possible operations: insertion, deletion, or substitution
curr[j] = Math.min(prev[j] + 1, curr[j - 1] + 1, prev[j - 1] + cost);
}
// Swap the previous and current rows for the next iteration
const temp = prev;
prev = curr;
curr = temp;
}
return prev[len2];
}

View File

@@ -162,7 +162,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
getCurrentSettings() {
const settings = this.model.settings;
if (!settings)
return;
return { id : this.id };
const ret: { [key: string]: any } = {};
for (const setting of settings) {
@@ -183,7 +183,10 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
if (this.hasMotionType)
ret['motionAsObjects'] = true;
return ret;
return {
...ret,
id: this.id,
};
}
maybeStartDetection() {
@@ -910,9 +913,10 @@ class ObjectDetectorMixin extends MixinDeviceBase<ObjectDetection> implements Mi
let objectDetection = systemManager.getDeviceById<ObjectDetection>(this.id);
const hasMotionType = this.model.classes.includes('motion');
const group = hasMotionType ? 'Motion Detection' : 'Object Detection';
const model = await objectDetection.getDetectionModel({ id: mixinDeviceState.id });
// const group = objectDetection.name.replace('Plugin', '').trim();
const ret = new ObjectDetectionMixin(this.plugin, mixinDevice, mixinDeviceInterfaces, mixinDeviceState, this.mixinProviderNativeId, objectDetection, this.model, group, hasMotionType);
const ret = new ObjectDetectionMixin(this.plugin, mixinDevice, mixinDeviceInterfaces, mixinDeviceState, this.mixinProviderNativeId, objectDetection, model, group, hasMotionType);
this.currentMixins.add(ret);
return ret;
}

View File

@@ -1,6 +1,7 @@
import sdk, { Camera, EventListenerRegister, MediaObject, MotionSensor, ObjectDetector, ObjectsDetected, Readme, RequestPictureOptions, ResponsePictureOptions, ScryptedDevice, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedNativeId, Setting, SettingValue, Settings } from "@scrypted/sdk";
import { StorageSetting, StorageSettings } from "@scrypted/sdk/storage-settings";
import type { ObjectDetectionPlugin } from "./main";
import { levenshteinDistance } from "./edit-distance";
export const SMART_MOTIONSENSOR_PREFIX = 'smart-motionsensor-';
@@ -44,7 +45,7 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
defaultValue: 0.7,
},
requireDetectionThumbnail: {
title: 'Rquire Detections with Images',
title: 'Require Detections with Images',
description: 'When enabled, this sensor will ignore detections results that do not have images.',
type: 'boolean',
defaultValue: false,
@@ -55,6 +56,21 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
type: 'boolean',
defaultValue: false,
},
labels: {
group: 'Recognition',
title: 'Labels',
description: 'The labels (license numbers, names) that will trigger this smart motion sensor.',
multiple: true,
combobox: true,
choices: [],
},
labelDistance: {
group: 'Recognition',
title: 'Label Distance',
description: 'The maximum edit distance between the detected label and the desired label. Ie, a distance of 1 will match "abcde" to "abcbe" or "abcd".',
type: 'number',
defaultValue: 2,
},
});
listener: EventListenerRegister;
@@ -157,6 +173,8 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
if (this.storageSettings.values.requireDetectionThumbnail && !detected.detectionId)
return false;
const { labels, labelDistance } = this.storageSettings.values;
const match = detected.detections?.find(d => {
if (this.storageSettings.values.requireScryptedNvrDetections && !d.boundingBox)
return false;
@@ -181,10 +199,27 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
this.console.warn('Camera does not provide Zones in detection event. Zone filter will not be applied.');
}
}
if (!d.movement)
return true;
return d.movement.moving;
})
// when not searching for a label, validate the object is moving.
if (!labels?.length)
return !d.movement || d.movement.moving;
if (!d.label)
return false;
for (const label of labels) {
if (label === d.label)
return true;
if (!labelDistance)
continue;
if (levenshteinDistance(label, d.label) <= labelDistance)
return true;
this.console.log('No label does not match.', label, d.label);
}
return false;
});
if (match) {
if (!this.motionDetected)
console.log('Smart Motion Sensor triggered on', match);

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/onvif",
"version": "0.1.10",
"version": "0.1.14",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/onvif",
"version": "0.1.10",
"version": "0.1.14",
"license": "Apache",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/onvif",
"version": "0.1.10",
"version": "0.1.14",
"description": "ONVIF Camera Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",

View File

@@ -97,7 +97,7 @@ export class OnvifIntercom implements Intercom {
this.camera.console.log('backchannel transport', transportDict);
const availableMatches = audioBackchannel.rtpmaps.filter(rtpmap => rtpmap.ffmpegEncoder);
const defaultMatch = audioBackchannel.rtpmaps.find(rtpmap => rtpmap.ffmpegEncoder);
const defaultMatch = audioBackchannel.rtpmaps.find(rtpmap => rtpmap.ffmpegEncoder === 'pcm_mulaw') || audioBackchannel.rtpmaps.find(rtpmap => rtpmap.ffmpegEncoder);
if (!defaultMatch)
throw new Error('no supported codec was found for back channel');
@@ -151,7 +151,7 @@ export class OnvifIntercom implements Intercom {
}
const elapsedRtpTimeMs = Math.abs(pending.header.timestamp - p.header.timestamp) / 8000 * 1000;
if (elapsedRtpTimeMs <= 60) {
if (elapsedRtpTimeMs <= 160 && pending.payload.length + p.payload.length <= 1024) {
pending.payload = Buffer.concat([pending.payload, p.payload]);
return;
}

View File

@@ -11,7 +11,7 @@
// local checkout
"scrypted.debugHost": "127.0.0.1",
"scrypted.serverRoot": "/Users/koush/.scrypted",
// "scrypted.debugHost": "koushik-windows",
// "scrypted.debugHost": "koushik-winvm",
// "scrypted.serverRoot": "C:\\Users\\koush\\.scrypted",
"scrypted.pythonRemoteRoot": "${config:scrypted.serverRoot}/volume/plugin.zip",

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/openvino",
"version": "0.1.54",
"version": "0.1.77",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/openvino",
"version": "0.1.54",
"version": "0.1.77",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -33,6 +33,7 @@
"runtime": "python",
"type": "API",
"interfaces": [
"DeviceProvider",
"Settings",
"ObjectDetection",
"ObjectDetectionPreview"
@@ -41,5 +42,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.54"
"version": "0.1.77"
}

View File

@@ -0,0 +1,36 @@
import concurrent.futures
from PIL import Image
import asyncio
from typing import Tuple
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
toThreadExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="image")
async def to_thread(f):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(toThreadExecutor, f)
async def ensureRGBData(data: bytes, size: Tuple[int, int], format: str):
if format == 'rgb':
return Image.frombuffer('RGB', size, data)
def convert():
rgba = Image.frombuffer('RGBA', size, data)
try:
return rgba.convert('RGB')
finally:
rgba.close()
return await to_thread(convert)
async def ensureRGBAData(data: bytes, size: Tuple[int, int], format: str):
if format == 'rgba':
return Image.frombuffer('RGBA', size, data)
# this path should never be possible as all the image sources should be capable of rgba.
def convert():
rgb = Image.frombuffer('RGB', size, data)
try:
return rgb.convert('RGBA')
finally:
rgb.close()
return await to_thread(convert)

View File

@@ -0,0 +1,44 @@
import numpy as np
def softmax(X, theta = 1.0, axis = None):
"""
Compute the softmax of each element along an axis of X.
Parameters
----------
X: ND-Array. Probably should be floats.
theta (optional): float parameter, used as a multiplier
prior to exponentiation. Default = 1.0
axis (optional): axis to compute values along. Default is the
first non-singleton axis.
Returns an array the same size as X. The result will sum to 1
along the specified axis.
"""
# make X at least 2d
y = np.atleast_2d(X)
# find axis
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
# multiply y against the theta parameter,
y = y * float(theta)
# subtract the max for numerical stability
y = y - np.expand_dims(np.max(y, axis = axis), axis)
# exponentiate y
y = np.exp(y)
# take the sum along the specified axis
ax_sum = np.expand_dims(np.sum(y, axis = axis), axis)
# finally: divide elementwise
p = y / ax_sum
# flatten if X was 1D
if len(X.shape) == 1: p = p.flatten()
return p

View File

@@ -0,0 +1,98 @@
from PIL import Image, ImageOps
from scrypted_sdk import (
ObjectDetectionResult,
)
import scrypted_sdk
import numpy as np
from common.softmax import softmax
from common.colors import ensureRGBData
import math
async def crop_text(d: ObjectDetectionResult, image: scrypted_sdk.Image, width: int, height: int):
l, t, w, h = d["boundingBox"]
l = math.floor(l)
t = math.floor(t)
w = math.floor(w)
h = math.floor(h)
format = image.format or 'rgb'
cropped = await image.toBuffer(
{
"crop": {
"left": l,
"top": t,
"width": w,
"height": h,
},
"format": format,
}
)
pilImage = await ensureRGBData(cropped, (w, h), format)
resized = pilImage.resize((width, height), resample=Image.LANCZOS).convert("L")
pilImage.close()
return resized
async def prepare_text_result(d: ObjectDetectionResult, image: scrypted_sdk.Image):
new_height = 64
new_width = int(d["boundingBox"][2] * new_height / d["boundingBox"][3])
textImage = await crop_text(d, image, new_width, new_height)
new_width = 256
# calculate padding dimensions
padding = (0, 0, new_width - textImage.width, 0)
# todo: clamp entire edge rather than just center
edge_color = textImage.getpixel((textImage.width - 1, textImage.height // 2))
# pad image
textImage = ImageOps.expand(textImage, padding, fill=edge_color)
# pil to numpy
image_array = np.array(textImage)
image_array = image_array.reshape(textImage.height, textImage.width, 1)
image_tensor = image_array.transpose((2, 0, 1)) / 255
# test normalize contrast
# image_tensor = (image_tensor - np.min(image_tensor)) / (np.max(image_tensor) - np.min(image_tensor))
image_tensor = (image_tensor - 0.5) / 0.5
image_tensor = np.expand_dims(image_tensor, axis=0)
return image_tensor
characters = "0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ €ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
dict_character = list(characters)
character = ["[blank]"] + dict_character # dummy '[blank]' token for CTCLoss (index 0)
def decode_greedy(text_index, length):
"""convert text-index into text-label."""
texts = []
index = 0
for l in length:
t = text_index[index : index + l]
# Returns a boolean array where true is when the value is not repeated
a = np.insert(~((t[1:] == t[:-1])), 0, True)
# Returns a boolean array where true is when the value is not in the ignore_idx list
b = ~np.isin(t, np.array(""))
# Combine the two boolean array
c = a & b
# Gets the corresponding character according to the saved indexes
text = "".join(np.array(character)[t[c.nonzero()]])
texts.append(text)
index += l
return texts
def process_text_result(preds):
preds_size = preds.shape[1]
# softmax preds using scipy
preds_prob = softmax(preds, axis=2)
# preds_prob = softmax(preds)
pred_norm = np.sum(preds_prob, axis=2)
preds_prob = preds_prob / np.expand_dims(pred_norm, axis=-1)
preds_index = np.argmax(preds_prob, axis=2)
preds_index = preds_index.reshape(-1)
preds_str = decode_greedy(preds_index, np.array([preds_size]))
# why index 0? are there multiple predictions?
return preds_str[0].replace('[blank]', '')

View File

@@ -1,12 +1,12 @@
import sys
from math import exp
import numpy as np
from predict import Prediction, Rectangle
from predict import Prediction
from predict.rectangle import Rectangle
defaultThreshold = .2
def parse_yolov8(results, threshold = defaultThreshold, scale = None, confidence_scale = None):
def parse_yolov9(results, threshold = defaultThreshold, scale = None, confidence_scale = None):
objs = []
keep = np.argwhere(results[4:] > threshold)
for indices in keep:

View File

@@ -1,52 +1,85 @@
from __future__ import annotations
import asyncio
import json
import re
from typing import Any, Tuple
import numpy as np
import openvino.runtime as ov
import scrypted_sdk
from PIL import Image
from scrypted_sdk.other import SettingValue
from scrypted_sdk.types import Setting
import concurrent.futures
from predict import PredictPlugin, Prediction, Rectangle
import numpy as np
import yolo
import common.yolo as yolo
from predict import Prediction, PredictPlugin
from predict.rectangle import Rectangle
from .recognition import OpenVINORecognition
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "OpenVINO-Predict")
availableModels = [
"Default",
"scrypted_yolov6n_320",
"scrypted_yolov6n",
"scrypted_yolov6s_320",
"scrypted_yolov6s",
"scrypted_yolov9c_320",
"scrypted_yolov9c",
"scrypted_yolov8n_320",
"scrypted_yolov8n",
"ssd_mobilenet_v1_coco",
"ssdlite_mobilenet_v2",
"yolo-v3-tiny-tf",
"yolo-v4-tiny-tf",
]
def parse_label_contents(contents: str):
lines = contents.splitlines()
lines = [line for line in lines if line.strip()]
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
pair = re.split(r"[:\s]+", content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
ret[int(pair[0])] = pair[1].strip()
else:
ret[row_number] = content.strip()
return ret
def param_to_string(parameters) -> str:
"""Convert a list / tuple of parameters returned from IE to a string."""
if isinstance(parameters, (list, tuple)):
return ', '.join([str(x) for x in parameters])
return ", ".join([str(x) for x in parameters])
else:
return str(parameters)
def dump_device_properties(core):
print('Available devices:')
print("Available devices:")
for device in core.available_devices:
print(f'{device} :')
print('\tSUPPORTED_PROPERTIES:')
for property_key in core.get_property(device, 'SUPPORTED_PROPERTIES'):
if property_key not in ('SUPPORTED_METRICS', 'SUPPORTED_CONFIG_KEYS', 'SUPPORTED_PROPERTIES'):
print(f"{device} :")
print("\tSUPPORTED_PROPERTIES:")
for property_key in core.get_property(device, "SUPPORTED_PROPERTIES"):
if property_key not in (
"SUPPORTED_METRICS",
"SUPPORTED_CONFIG_KEYS",
"SUPPORTED_PROPERTIES",
):
try:
property_val = core.get_property(device, property_key)
except TypeError:
property_val = 'UNSUPPORTED TYPE'
print(f'\t\t{property_key}: {param_to_string(property_val)}')
print('')
property_val = "UNSUPPORTED TYPE"
print(f"\t\t{property_key}: {param_to_string(property_val)}")
print("")
class OpenVINOPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings):
class OpenVINOPlugin(
PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings, scrypted_sdk.DeviceProvider
):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
@@ -54,70 +87,99 @@ class OpenVINOPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.S
dump_device_properties(self.core)
available_devices = self.core.available_devices
self.available_devices = available_devices
print('available devices: %s' % available_devices)
print("available devices: %s" % available_devices)
mode = self.storage.getItem('mode')
if mode == 'Default':
mode = 'AUTO'
mode = mode or 'AUTO'
mode = self.storage.getItem("mode")
if mode == "Default":
mode = "AUTO"
mode = mode or "AUTO"
self.mode = mode
precision = self.storage.getItem('precision') or 'Default'
if precision == 'Default':
precision = self.storage.getItem("precision") or "Default"
if precision == "Default":
using_mode = mode
if using_mode == 'AUTO':
if 'GPU' in available_devices:
using_mode = 'GPU'
if using_mode == 'GPU':
precision = 'FP16'
if using_mode == "AUTO":
if "GPU" in available_devices:
using_mode = "GPU"
if using_mode == "GPU":
precision = "FP16"
else:
precision = 'FP32'
precision = "FP32"
model = self.storage.getItem('model') or 'Default'
if model == 'Default':
model = 'yolov8n_320'
self.yolo = 'yolo' in model
self.yolov8 = "yolov8" in model
self.yolov9 = "yolov9" in model
self.sigmoid = model == 'yolo-v4-tiny-tf'
self.precision = precision
print(f'model/mode/precision: {model}/{mode}/{precision}')
model = self.storage.getItem("model") or "Default"
if model == "Default" or model not in availableModels:
if model != "Default":
self.storage.setItem("model", "Default")
model = "scrypted_yolov8n_320"
self.yolo = "yolo" in model
self.scrypted_yolo = "scrypted_yolo" in model
self.scrypted_model = "scrypted" in model
self.sigmoid = model == "yolo-v4-tiny-tf"
model_version = 'v4'
xmlFile = self.downloadFile(f'https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{model}.xml', f'{model_version}/{precision}/{model}.xml')
binFile = self.downloadFile(f'https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{model}.bin', f'{model_version}/{precision}/{model}.bin')
if self.yolo:
labelsFile = self.downloadFile('https://raw.githubusercontent.com/koush/openvino-models/main/coco_80cl.txt', 'coco_80cl.txt')
print(f"model/mode/precision: {model}/{mode}/{precision}")
ovmodel = "best" if self.scrypted_model else model
model_version = "v5"
xmlFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.xml",
f"{model_version}/{model}/{precision}/{ovmodel}.xml",
)
binFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.bin",
f"{model_version}/{model}/{precision}/{ovmodel}.bin",
)
if self.scrypted_model:
labelsFile = self.downloadFile(
"https://raw.githubusercontent.com/koush/openvino-models/main/scrypted_labels.txt",
"scrypted_labels.txt",
)
elif self.yolo:
labelsFile = self.downloadFile(
"https://raw.githubusercontent.com/koush/openvino-models/main/coco_80cl.txt",
"coco_80cl.txt",
)
else:
labelsFile = self.downloadFile('https://raw.githubusercontent.com/koush/openvino-models/main/coco_labels.txt', 'coco_labels.txt')
labelsFile = self.downloadFile(
"https://raw.githubusercontent.com/koush/openvino-models/main/coco_labels.txt",
"coco_labels.txt",
)
print(xmlFile, binFile, labelsFile)
try:
self.compiled_model = self.core.compile_model(xmlFile, mode)
print("EXECUTION_DEVICES", self.compiled_model.get_property("EXECUTION_DEVICES"))
print(
"EXECUTION_DEVICES",
self.compiled_model.get_property("EXECUTION_DEVICES"),
)
except:
import traceback
traceback.print_exc()
print("Reverting all settings.")
self.storage.removeItem('mode')
self.storage.removeItem('model')
self.storage.removeItem('precision')
self.storage.removeItem("mode")
self.storage.removeItem("model")
self.storage.removeItem("precision")
self.requestRestart()
# mobilenet 1,300,300,3
# yolov3/4 1,416,416,3
# yolov8 1,3,320,320
# yolov9 1,3,320,320
# second dim is always good.
self.model_dim = self.compiled_model.inputs[0].shape[2]
labels_contents = open(labelsFile, 'r').read()
labels_contents = open(labelsFile, "r").read()
self.labels = parse_label_contents(labels_contents)
asyncio.ensure_future(self.prepareRecognitionModels(), loop=self.loop)
async def getSettings(self) -> list[Setting]:
mode = self.storage.getItem('mode') or 'Default'
model = self.storage.getItem('model') or 'Default'
precision = self.storage.getItem('precision') or 'Default'
mode = self.storage.getItem("mode") or "Default"
model = self.storage.getItem("model") or "Default"
precision = self.storage.getItem("precision") or "Default"
return [
{
"title": "Available Devices",
@@ -127,47 +189,38 @@ class OpenVINOPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.S
"key": "available_devices",
},
{
'key': 'model',
'title': 'Model',
'description': 'The detection model used to find objects.',
'choices': [
'Default',
'ssd_mobilenet_v1_coco',
'ssdlite_mobilenet_v2',
'yolo-v3-tiny-tf',
'yolo-v4-tiny-tf',
'yolov8n',
'yolov8n_320',
'yolov9c_320',
],
'value': model,
"key": "model",
"title": "Model",
"description": "The detection model used to find objects.",
"choices": availableModels,
"value": model,
},
{
'key': 'mode',
'title': 'Mode',
'description': 'AUTO, CPU, or GPU mode to use for detections. Requires plugin reload. Use CPU if the system has unreliable GPU drivers.',
'choices': [
'Default',
'AUTO',
'CPU',
'GPU',
"key": "mode",
"title": "Mode",
"description": "AUTO, CPU, or GPU mode to use for detections. Requires plugin reload. Use CPU if the system has unreliable GPU drivers.",
"choices": [
"Default",
"AUTO",
"CPU",
"GPU",
],
'value': mode,
'combobox': True,
"value": mode,
"combobox": True,
},
{
'key': 'precision',
'title': 'Precision',
'description': 'The model floating point precision. FP16 is recommended for GPU. FP32 is recommended for CPU.',
'choices': [
'Default',
'FP16',
'FP32',
"key": "precision",
"title": "Precision",
"description": "The model floating point precision. FP16 is recommended for GPU. FP32 is recommended for CPU.",
"choices": [
"Default",
"FP16",
"FP32",
],
'value': precision,
}
"value": precision,
},
]
async def putSetting(self, key: str, value: SettingValue):
self.storage.setItem(key, value)
await self.onDeviceEvent(scrypted_sdk.ScryptedInterface.Settings.value, None)
@@ -181,30 +234,15 @@ class OpenVINOPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.S
return [self.model_dim, self.model_dim]
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
async def predict():
def predict(input_tensor):
infer_request = self.compiled_model.create_infer_request()
# the input_tensor can be created with the shared_memory=True parameter,
# but that seems to cause issues on some platforms.
if self.yolov8 or self.yolov9:
im = np.stack([input])
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
im = im.astype(np.float32) / 255.0
im = np.ascontiguousarray(im) # contiguous
im = ov.Tensor(array=im)
input_tensor = im
elif self.yolo:
input_tensor = ov.Tensor(array=np.expand_dims(np.array(input), axis=0).astype(np.float32))
else:
input_tensor = ov.Tensor(array=np.expand_dims(np.array(input), axis=0))
# Set input tensor for model with one input
infer_request.set_input_tensor(input_tensor)
infer_request.start_async()
infer_request.wait()
output_tensors = infer_request.infer()
objs = []
if self.yolov8 or self.yolov9:
objs = yolo.parse_yolov8(infer_request.output_tensors[0].data[0])
if self.scrypted_yolo:
objs = yolo.parse_yolov9(output_tensors[0][0])
return objs
if self.yolo:
@@ -214,17 +252,21 @@ class OpenVINOPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.S
out_blob = infer_request.outputs[0]
else:
out_blob = infer_request.outputs[1]
# 13 13
objects = yolo.parse_yolo_region(out_blob.data, (input.width, input.height),(81,82, 135,169, 344,319), self.sigmoid)
objects = yolo.parse_yolo_region(
out_blob.data,
(input.width, input.height),
(81, 82, 135, 169, 344, 319),
self.sigmoid,
)
for r in objects:
obj = Prediction(r['classId'], r['confidence'], Rectangle(
r['xmin'],
r['ymin'],
r['xmax'],
r['ymax']
))
obj = Prediction(
r["classId"],
r["confidence"],
Rectangle(r["xmin"], r["ymin"], r["xmax"], r["ymax"]),
)
objs.append(obj)
# what about output[1]?
@@ -233,7 +275,6 @@ class OpenVINOPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.S
return objs
output = infer_request.get_output_tensor(0)
for values in output.data[0][0].astype(float):
valid, index, confidence, l, t, r, b = values
@@ -248,22 +289,59 @@ class OpenVINOPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.S
r = torelative(r)
b = torelative(b)
obj = Prediction(index - 1, confidence, Rectangle(
l,
t,
r,
b
))
obj = Prediction(index - 1, confidence, Rectangle(l, t, r, b))
objs.append(obj)
return objs
# the input_tensor can be created with the shared_memory=True parameter,
# but that seems to cause issues on some platforms.
if self.scrypted_yolo:
im = np.stack([input])
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
im = im.astype(np.float32) / 255.0
im = np.ascontiguousarray(im) # contiguous
im = ov.Tensor(array=im)
input_tensor = im
elif self.yolo:
input_tensor = ov.Tensor(
array=np.expand_dims(np.array(input), axis=0).astype(np.float32)
)
else:
input_tensor = ov.Tensor(array=np.expand_dims(np.array(input), axis=0))
try:
objs = await predict()
objs = await asyncio.get_event_loop().run_in_executor(
predictExecutor, lambda: predict(input_tensor)
)
except:
import traceback
traceback.print_exc()
raise
ret = self.create_detection_result(objs, src_size, cvss)
return ret
async def prepareRecognitionModels(self):
try:
await scrypted_sdk.deviceManager.onDevicesChanged(
{
"devices": [
{
"nativeId": "recognition",
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
"interfaces": [
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
],
"name": "OpenVINO Recognition",
}
]
}
)
except:
pass
async def getDevice(self, nativeId: str) -> Any:
return OpenVINORecognition(self, nativeId)

View File

@@ -0,0 +1,71 @@
from __future__ import annotations
import concurrent.futures
import openvino.runtime as ov
import numpy as np
from predict.recognize import RecognizeDetection
def euclidean_distance(arr1, arr2):
return np.linalg.norm(arr1 - arr2)
def cosine_similarity(vector_a, vector_b):
dot_product = np.dot(vector_a, vector_b)
norm_a = np.linalg.norm(vector_a)
norm_b = np.linalg.norm(vector_b)
similarity = dot_product / (norm_a * norm_b)
return similarity
class OpenVINORecognition(RecognizeDetection):
def __init__(self, plugin, nativeId: str | None = None):
self.plugin = plugin
super().__init__(nativeId=nativeId)
def downloadModel(self, model: str):
ovmodel = "best"
precision = self.plugin.precision
model_version = "v5"
xmlFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.xml",
f"{model_version}/{model}/{precision}/{ovmodel}.xml",
)
binFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.bin",
f"{model_version}/{model}/{precision}/{ovmodel}.bin",
)
print(xmlFile, binFile)
return self.plugin.core.compile_model(xmlFile, self.plugin.mode)
def predictDetectModel(self, input):
infer_request = self.detectModel.create_infer_request()
im = np.stack([input])
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
im = im.astype(np.float32) / 255.0
im = np.ascontiguousarray(im) # contiguous
im = ov.Tensor(array=im)
input_tensor = im
infer_request.set_input_tensor(input_tensor)
infer_request.start_async()
infer_request.wait()
return infer_request.output_tensors[0].data[0]
def predictFaceModel(self, input):
im = ov.Tensor(array=input)
infer_request = self.faceModel.create_infer_request()
infer_request.set_input_tensor(im)
infer_request.start_async()
infer_request.wait()
return infer_request.output_tensors[0].data[0]
def predictTextModel(self, input):
input = input.astype(np.float32)
im = ov.Tensor(array=input)
infer_request = self.textModel.create_infer_request()
infer_request.set_input_tensor(im)
infer_request.start_async()
infer_request.wait()
return infer_request.output_tensors[0].data

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/python-codecs",
"version": "0.1.95",
"version": "0.1.96",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/python-codecs",
"version": "0.1.95",
"version": "0.1.96",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/python-codecs",
"version": "0.1.95",
"version": "0.1.96",
"description": "Python Codecs for Scrypted",
"keywords": [
"scrypted",

View File

@@ -1,6 +1,7 @@
import asyncio
import time
import traceback
import os
from typing import Any, AsyncGenerator, List, Union
import scrypted_sdk
@@ -202,7 +203,7 @@ def multiprocess_exit():
class CodecFork:
def timeoutExit():
def timeoutExit(self):
print("Frame yield timed out, exiting pipeline.")
multiprocess_exit()

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/reolink",
"version": "0.0.63",
"version": "0.0.66",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/reolink",
"version": "0.0.63",
"version": "0.0.66",
"license": "Apache",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/reolink",
"version": "0.0.63",
"version": "0.0.66",
"description": "Reolink Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/snapshot",
"version": "0.2.40",
"version": "0.2.50",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/snapshot",
"version": "0.2.40",
"version": "0.2.50",
"dependencies": {
"@types/node": "^20.10.6",
"sharp": "^0.33.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/snapshot",
"version": "0.2.40",
"version": "0.2.50",
"description": "Snapshot Plugin for Scrypted",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",

View File

@@ -19,6 +19,6 @@ export class ImageConverter extends ScryptedDeviceBase implements BufferConverte
const op = parseImageOp(mime.parameters);
const ffmpegInput = JSON.parse(data.toString()) as FFmpegInput;
return processImageOp(ffmpegInput, op, parseFloat(mime.parameters.get('time')), options?.sourceId, this.plugin.debugConsole);
return processImageOp(ffmpegInput, op, parseFloat(mime.parameters.get('time')), options?.sourceId, !!this.plugin.debugConsole);
}
}

View File

@@ -1,5 +1,6 @@
import sdk, { BufferConverter, Image, ImageOptions, MediaObject, MediaObjectOptions, ScryptedDeviceBase, ScryptedMimeTypes } from "@scrypted/sdk";
import type sharp from 'sharp';
import type { KernelEnum } from "sharp";
let hasLoadedSharp = false;
let sharpInstance: typeof sharp;
@@ -8,8 +9,6 @@ export function loadSharp() {
hasLoadedSharp = true;
try {
sharpInstance = require('sharp');
// not exposed by sharp but it exists.
(sharpInstance.kernel as any).linear = 'linear';
console.log('sharp loaded');
}
catch (e) {
@@ -60,11 +59,8 @@ export class VipsImage implements Image {
});
}
if (options?.resize) {
let kernel: string;
let kernel: keyof KernelEnum;
switch (options?.resize.filter) {
case 'bilinear':
kernel = 'linear';
break;
case 'lanczos':
kernel = 'lanczos2';
break;
@@ -75,12 +71,13 @@ export class VipsImage implements Image {
kernel = 'nearest';
break;
default:
kernel = 'linear';
kernel = 'cubic';
break
}
transformed.resize(typeof options.resize.width === 'number' ? Math.floor(options.resize.width) : undefined, typeof options.resize.height === 'number' ? Math.floor(options.resize.height) : undefined, {
fit: "cover",
kernel: kernel as any,
// haven't decided if these are the correct defaults.
fit: options.resize.width && options.resize.height ? 'fill' : 'cover',
kernel: kernel,
});
}
@@ -132,13 +129,21 @@ export class VipsImage implements Image {
}
}
export async function loadVipsImage(data: Buffer | string, sourceId: string) {
loadSharp();
export async function loadVipsMetadata(data: Buffer | string) {
const image = sharpInstance(data, {
failOn: 'none'
});
const metadata = await image.metadata();
return {
image,
metadata,
}
}
export async function loadVipsImage(data: Buffer | string, sourceId: string) {
loadSharp();
const { image, metadata } = await loadVipsMetadata(data);
const vipsImage = new VipsImage(image, metadata, sourceId);
return vipsImage;
}

View File

@@ -103,7 +103,10 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
type: 'clippath',
},
});
snapshotDebouncer = createMapPromiseDebouncer<Buffer>();
snapshotDebouncer = createMapPromiseDebouncer<{
picture: Buffer;
pictureTime: number;
}>();
errorPicture: RefreshPromise<Buffer>;
timeoutPicture: RefreshPromise<Buffer>;
progressPicture: RefreshPromise<Buffer>;
@@ -203,6 +206,7 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
const takePicture = await preparePrebufferSnapshot()
if (!takePicture)
throw e;
this.console.error('Snapshot failed, falling back to prebuffer', e);
return takePicture();
}
@@ -267,123 +271,137 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
}
async takePictureRaw(options?: RequestPictureOptions): Promise<Buffer> {
let picture: Buffer;
let rawPicturePromise: Promise<{
picture: Buffer;
pictureTime: number;
}>;
const eventSnapshot = options?.reason === 'event';
const periodicSnapshot = options?.reason === 'periodic';
try {
// clear out snapshots that are too old.
if (this.currentPictureTime < Date.now() - 1 * 60 * 60 * 1000)
this.currentPicture = undefined;
const allowedSnapshotStaleness = eventSnapshot ? 0 : periodicSnapshot ? 20000 : 10000;
let needRefresh = true;
if (this.currentPicture && this.currentPictureTime > Date.now() - allowedSnapshotStaleness) {
this.debugConsole?.log('Using cached snapshot for', options?.reason);
rawPicturePromise = Promise.resolve({
picture: this.currentPicture,
pictureTime: this.currentPictureTime,
});
needRefresh = this.currentPictureTime < Date.now() - allowedSnapshotStaleness / 2;
}
if (needRefresh) {
const debounced = this.snapshotDebouncer({
id: options?.id,
reason: options?.reason,
}, eventSnapshot ? 0 : 2000, async () => {
}, eventSnapshot ? 0 : 10000, async () => {
const snapshotTimer = Date.now();
let picture = await this.takePictureInternal();
picture = await this.cropAndScale(picture);
this.clearCachedPictures();
const pictureTime = Date.now();
this.currentPicture = picture;
this.currentPictureTime = Date.now();
this.currentPictureTime = pictureTime;
this.lastAvailablePicture = picture;
this.debugConsole?.debug(`Periodic snapshot took ${(this.currentPictureTime - snapshotTimer) / 1000} seconds to retrieve.`)
return picture;
return {
picture,
pictureTime,
};
});
debounced.catch(() => { });
// periodic snapshot should get the immediately available picture.
// the debounce has already triggered a refresh for the next go around.
if (periodicSnapshot && this.currentPicture) {
const cp = this.currentPicture;
debounced.catch(() => { });
const timeout = options.timeout || 1000;
try {
picture = await timeoutPromise(timeout, debounced);
}
catch (e) {
if (e instanceof TimeoutError)
this.debugConsole?.log(`Periodic snapshot took longer than ${timeout} seconds to retrieve, falling back to cached picture.`)
rawPicturePromise ||= debounced;
}
picture = cp;
}
}
else {
picture = await debounced;
}
// prevent this from expiring
let availablePicture = this.currentPicture;
let availablePictureTime = this.currentPictureTime;
let rawPicture: Awaited<typeof rawPicturePromise>;
try {
const pictureTimeout = options?.timeout || (periodicSnapshot && availablePicture ? 1000 : 10000) || 10000;
rawPicture = await timeoutPromise(pictureTimeout, rawPicturePromise);
}
catch (e) {
// use the fallback cached picture if it is somewhat recent.
if (this.currentPictureTime < Date.now() - 1 * 60 * 60 * 1000)
this.currentPicture = undefined;
// a best effort was made to get a recent snapshot from cache or from a camera request,
// the cache request will never fail, but if the camera request fails,
// it may be ok to use a somewhat stale snapshot depending on reason.
// event snapshot requests must not use cache since they're for realtime processing by homekit and nvr.
if (eventSnapshot)
throw e;
if (!this.currentPicture)
availablePicture = this.currentPicture || availablePicture;
if (!availablePicture)
return this.createErrorImage(e);
this.console.warn('Snapshot failed, but recovered from cache', e);
picture = this.currentPicture;
rawPicture = {
picture: availablePicture,
pictureTime: availablePictureTime,
};
// gc
availablePicture = undefined;
}
const needSoftwareResize = !!(options?.picture?.width || options?.picture?.height) && this.storageSettings.values.snapshotResolution !== 'Full Resolution';
if (needSoftwareResize) {
try {
picture = await this.snapshotDebouncer({
needSoftwareResize: true,
picture: options.picture,
}, eventSnapshot ? 0 : 2000, async () => {
this.debugConsole?.log("Resizing picture from camera", options?.picture);
if (loadSharp()) {
const vips = await loadVipsImage(picture, this.id);
try {
const ret = await vips.toBuffer({
resize: options?.picture,
format: 'jpg',
});
return ret;
}
finally {
vips.close();
}
if (!needSoftwareResize)
return rawPicture.picture;
try {
const key = {
pictureTime: rawPicture.pictureTime,
reason: options?.reason,
needSoftwareResize: true,
picture: options.picture,
};
const ret = await this.snapshotDebouncer(key, 10000, async () => {
this.debugConsole?.log("Resizing picture from camera", key);
if (loadSharp()) {
const vips = await loadVipsImage(rawPicture.picture, this.id);
try {
const ret = await vips.toBuffer({
resize: options?.picture,
format: 'jpg',
});
return {
picture: ret,
pictureTime: rawPicture.pictureTime,
};
}
finally {
vips.close();
}
}
// try {
// const mo = await mediaManager.createMediaObject(picture, 'image/jpeg', {
// sourceId: this.id,
// });
// const image = await mediaManager.convertMediaObject<Image>(mo, ScryptedMimeTypes.Image);
// let { width, height } = options.picture;
// if (!width)
// width = height / image.height * image.width;
// if (!height)
// height = width / image.width * image.height;
// return await image.toBuffer({
// resize: {
// width,
// height,
// },
// format: 'jpg',
// });
// }
// catch (e) {
// if (!e.message?.includes('no converter found'))
// throw e;
// }
return ffmpegFilterImageBuffer(picture, {
console: this.debugConsole,
ffmpegPath: await mediaManager.getFFmpegPath(),
resize: options?.picture,
timeout: 10000,
});
const ret = await ffmpegFilterImageBuffer(rawPicture.picture, {
console: this.debugConsole,
ffmpegPath: await mediaManager.getFFmpegPath(),
resize: options?.picture,
timeout: 10000,
});
}
catch (e) {
if (eventSnapshot)
throw e;
return this.createErrorImage(e);
}
return {
picture: ret,
pictureTime: rawPicture.pictureTime,
};
});
return ret.picture;
}
catch (e) {
if (eventSnapshot)
throw e;
return this.createErrorImage(e);
}
return picture;
}
async takePicture(options?: RequestPictureOptions): Promise<MediaObject> {
@@ -505,6 +523,7 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
return this.progressPicture.promise;
}
else {
this.console.error('Snapshot failed', e);
this.errorPicture = singletonPromise(this.errorPicture,
() => this.createTextErrorImage('Snapshot Failed'));
return this.errorPicture.promise;

View File

@@ -1,4 +1,6 @@
import sdk, { FFmpegInput, RecordingStreamThumbnailOptions } from '@scrypted/sdk';
import { Console } from 'console';
import { PassThrough } from 'stream';
import url from 'url';
import type { MIMETypeParameters } from 'whatwg-mimetype';
import { FFmpegImageFilterOptions, ffmpegFilterImage, ffmpegFilterImageBuffer } from './ffmpeg-image-filter';
@@ -71,7 +73,7 @@ export function toImageOp(options: RecordingStreamThumbnailOptions) {
return ret;
}
export async function processImageOp(input: string | FFmpegInput | Buffer, op: ImageOp, time: number, sourceId: string, debugConsole: Console): Promise<Buffer> {
export async function processImageOp(input: string | FFmpegInput | Buffer, op: ImageOp, time: number, sourceId: string, debug?: boolean): Promise<Buffer> {
const { crop, resize } = op;
const { width, height, fractional } = resize || {};
const { left, top, right, bottom, fractional: cropFractional } = crop || {};
@@ -120,8 +122,19 @@ export async function processImageOp(input: string | FFmpegInput | Buffer, op: I
}
}
const out = new PassThrough();
let console = new Console(out, out);
const printConsole = () => {
if (!console)
return;
console = undefined;
const data = out.read().toString();
const deviceConsole = sdk.deviceManager.getMixinConsole(sourceId);
deviceConsole.log(data);
}
const ffmpegOpts: FFmpegImageFilterOptions = {
console: debugConsole,
console,
ffmpegPath: await sdk.mediaManager.getFFmpegPath(),
resize: width === undefined && height === undefined
? undefined
@@ -143,22 +156,32 @@ export async function processImageOp(input: string | FFmpegInput | Buffer, op: I
time,
};
if (Buffer.isBuffer(input)) {
return ffmpegFilterImageBuffer(input, ffmpegOpts);
try {
if (Buffer.isBuffer(input)) {
return await ffmpegFilterImageBuffer(input, ffmpegOpts);
}
const ffmpegInput: FFmpegInput = typeof input !== 'string'
? input
: {
inputArguments: [
'-i', input,
]
};
const args = [
...ffmpegInput.inputArguments,
...(ffmpegInput.h264EncoderArguments || []),
];
return await ffmpegFilterImage(args, ffmpegOpts);
}
catch (e) {
printConsole();
throw e;
}
finally {
if (debug)
printConsole();
}
const ffmpegInput: FFmpegInput = typeof input !== 'string'
? input
: {
inputArguments: [
'-i', input,
]
};
const args = [
...ffmpegInput.inputArguments,
...(ffmpegInput.h264EncoderArguments || []),
];
return ffmpegFilterImage(args, ffmpegOpts);
}

View File

@@ -9,7 +9,7 @@
// "scrypted.serverRoot": "/home/pi/.scrypted",
// lxc installation
"scrypted.debugHost": "scrypted-server",
"scrypted.debugHost": "scrypted-test",
"scrypted.serverRoot": "/root/.scrypted",
// local checkout

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/tensorflow-lite",
"version": "0.1.48",
"version": "0.1.59",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/tensorflow-lite",
"version": "0.1.48",
"version": "0.1.59",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -40,7 +40,7 @@
"arm64": "3.10"
},
"win32": {
"x64": "-3.9"
"x64": "3.9"
}
},
"type": "API",
@@ -53,5 +53,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.48"
"version": "0.1.59"
}

View File

@@ -0,0 +1 @@
../../openvino/src/common

View File

@@ -1,9 +1,9 @@
from __future__ import annotations
import asyncio
import concurrent.futures
import os
import re
import traceback
import urllib.request
from typing import Any, List, Tuple
@@ -12,69 +12,60 @@ from PIL import Image
from scrypted_sdk.types import (ObjectDetectionResult, ObjectDetectionSession,
ObjectsDetected, Setting)
import common.colors
from detect import DetectPlugin
import traceback
from .rectangle import (Rectangle, combine_rect, from_bounding_box,
intersect_area, intersect_rect, to_bounding_box)
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
toThreadExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="image")
async def to_thread(f):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(toThreadExecutor, f)
async def ensureRGBData(data: bytes, size: Tuple[int, int], format: str):
if format != 'rgba':
return Image.frombuffer('RGB', size, data)
def convert():
rgba = Image.frombuffer('RGBA', size, data)
try:
return rgba.convert('RGB')
finally:
rgba.close()
return await to_thread(convert)
def parse_label_contents(contents: str):
lines = contents.splitlines()
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
ret[int(pair[0])] = pair[1].strip()
else:
ret[row_number] = content.strip()
return ret
class Prediction:
def __init__(self, id: int, score: float, bbox: Tuple[float, float, float, float]):
def __init__(self, id: int, score: float, bbox: Tuple[float, float, float, float], embedding: str = None):
self.id = id
self.score = score
self.bbox = bbox
self.embedding = embedding
class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter):
class PredictPlugin(DetectPlugin):
labels: dict
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
# periodic restart because there seems to be leaks in tflite or coral API.
loop = asyncio.get_event_loop()
loop.call_later(4 * 60 * 60, lambda: self.requestRestart())
# periodic restart of main plugin because there seems to be leaks in tflite or coral API.
if not nativeId:
loop = asyncio.get_event_loop()
loop.call_later(4 * 60 * 60, lambda: self.requestRestart())
self.batch: List[Tuple[Any, asyncio.Future]] = []
self.batching = 0
self.batch_flush = None
def downloadFile(self, url: str, filename: str):
filesPath = os.path.join(os.environ['SCRYPTED_PLUGIN_VOLUME'], 'files')
fullpath = os.path.join(filesPath, filename)
if os.path.isfile(fullpath):
try:
filesPath = os.path.join(os.environ['SCRYPTED_PLUGIN_VOLUME'], 'files')
fullpath = os.path.join(filesPath, filename)
if os.path.isfile(fullpath):
return fullpath
tmp = fullpath + '.tmp'
print("Creating directory for", tmp)
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
print("Downloading", url)
response = urllib.request.urlopen(url)
if response.getcode() < 200 or response.getcode() >= 300:
raise Exception(f"Error downloading")
read = 0
with open(tmp, "wb") as f:
while True:
data = response.read(1024 * 1024)
if not data:
break
read += len(data)
print("Downloaded", read, "bytes")
f.write(data)
os.rename(tmp, fullpath)
return fullpath
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
tmp = fullpath + '.tmp'
urllib.request.urlretrieve(url, tmp)
os.rename(tmp, fullpath)
return fullpath
except:
print("Error downloading", url)
import traceback
traceback.print_exc()
raise
def getClasses(self) -> list[str]:
return list(self.labels.values())
@@ -108,6 +99,8 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter):
obj.bbox.xmin, obj.bbox.ymin, obj.bbox.xmax - obj.bbox.xmin, obj.bbox.ymax - obj.bbox.ymin)
detection['className'] = className
detection['score'] = obj.score
if hasattr(obj, 'embedding') and obj.embedding is not None:
detection['embedding'] = obj.embedding
detections.append(detection)
if convert_to_src_size:
@@ -137,6 +130,41 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter):
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
pass
async def detect_batch(self, inputs: List[Any]) -> List[Any]:
pass
async def run_batch(self):
batch = self.batch
self.batch = []
self.batching = 0
if len(batch):
inputs = [x[0] for x in batch]
try:
results = await self.detect_batch(inputs)
for i, result in enumerate(results):
batch[i][1].set_result(result)
except Exception as e:
for i, result in enumerate(results):
batch[i][1].set_exception(e)
async def flush_batch(self):
self.batch_flush = None
await self.run_batch()
async def queue_batch(self, input: Any) -> List[Any]:
future = asyncio.Future(loop = asyncio.get_event_loop())
self.batch.append((input, future))
if self.batching:
self.batching = self.batching - 1
if self.batching:
# if there is any sort of error or backlog, .
if not self.batch_flush:
self.batch_flush = self.loop.call_later(.5, lambda: asyncio.ensure_future(self.flush_batch()))
return await future
await self.run_batch()
return await future
async def safe_detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
try:
f = self.detect_once(input, settings, src_size, cvss)
@@ -151,26 +179,42 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter):
async def run_detection_image(self, image: scrypted_sdk.Image, detection_session: ObjectDetectionSession) -> ObjectsDetected:
settings = detection_session and detection_session.get('settings')
batch = (detection_session and detection_session.get('batch')) or 0
self.batching += batch
iw, ih = image.width, image.height
w, h = self.get_input_size()
resize = None
xs = w / iw
ys = h / ih
def cvss(point):
return point[0] / xs, point[1] / ys
if w is None or h is None:
resize = None
w = image.width
h = image.height
def cvss(point):
return point
else:
resize = None
xs = w / iw
ys = h / ih
def cvss(point):
return point[0] / xs, point[1] / ys
if iw != w or ih != h:
resize = {
'width': w,
'height': h,
}
if iw != w or ih != h:
resize = {
'width': w,
'height': h,
}
format = image.format or self.get_input_format()
b = await image.toBuffer({
'resize': resize,
'format': image.format or 'rgb',
'format': format,
})
data = await ensureRGBData(b, (w, h), image.format)
if self.get_input_format() == 'rgb':
data = await common.colors.ensureRGBData(b, (w, h), format)
elif self.get_input_format() == 'rgba':
data = await common.colors.ensureRGBAData(b, (w, h), format)
try:
ret = await self.safe_detect_once(data, settings, (iw, ih), cvss)
return ret

View File

@@ -0,0 +1,230 @@
from __future__ import annotations
import asyncio
from asyncio import Future
import base64
import concurrent.futures
import os
from typing import Any, Tuple, List
import numpy as np
# import Quartz
import scrypted_sdk
# from Foundation import NSData, NSMakeSize
from PIL import Image
from scrypted_sdk import (
Setting,
SettingValue,
ObjectDetectionSession,
ObjectsDetected,
ObjectDetectionResult,
)
import traceback
# import Vision
from predict import PredictPlugin
from common import yolo
from common.text import prepare_text_result, process_text_result
def euclidean_distance(arr1, arr2):
return np.linalg.norm(arr1 - arr2)
def cosine_similarity(vector_a, vector_b):
dot_product = np.dot(vector_a, vector_b)
norm_a = np.linalg.norm(vector_a)
norm_b = np.linalg.norm(vector_b)
similarity = dot_product / (norm_a * norm_b)
return similarity
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "Recognize")
class RecognizeDetection(PredictPlugin):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
self.inputheight = 640
self.inputwidth = 640
self.labels = {
0: "face",
1: "plate",
2: "text",
}
self.loop = asyncio.get_event_loop()
self.minThreshold = 0.7
self.detectModel = self.downloadModel("scrypted_yolov9c_flt")
self.textModel = self.downloadModel("vgg_english_g2")
self.faceModel = self.downloadModel("inception_resnet_v1")
def downloadModel(self, model: str):
pass
async def getSettings(self) -> list[Setting]:
pass
async def putSetting(self, key: str, value: SettingValue):
self.storage.setItem(key, value)
await self.onDeviceEvent(scrypted_sdk.ScryptedInterface.Settings.value, None)
await scrypted_sdk.deviceManager.requestRestart()
# width, height, channels
def get_input_details(self) -> Tuple[int, int, int]:
return (self.inputwidth, self.inputheight, 3)
def get_input_size(self) -> Tuple[float, float]:
return (self.inputwidth, self.inputheight)
def get_input_format(self) -> str:
return "rgb"
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
results = await asyncio.get_event_loop().run_in_executor(
predictExecutor, lambda: self.predictDetectModel(input)
)
objs = yolo.parse_yolov9(results)
ret = self.create_detection_result(objs, src_size, cvss)
return ret
async def setEmbedding(self, d: ObjectDetectionResult, image: scrypted_sdk.Image):
try:
l, t, w, h = d["boundingBox"]
face = await image.toBuffer(
{
"crop": {
"left": l,
"top": t,
"width": w,
"height": h,
},
"resize": {
"width": 160,
"height": 160,
},
"format": "rgb",
}
)
faceImage = Image.frombuffer("RGB", (160, 160), face)
image_tensor = np.array(faceImage).astype(np.float32).transpose([2, 0, 1])
processed_tensor = (image_tensor - 127.5) / 128.0
processed_tensor = np.expand_dims(processed_tensor, axis=0)
output = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.predictFaceModel(processed_tensor)
)
b = output.tobytes()
embedding = base64.b64encode(b).decode("utf-8")
d["embedding"] = embedding
except Exception as e:
traceback.print_exc()
pass
def predictTextModel(self, input):
pass
def predictDetectModel(self, input):
pass
def predictFaceModel(self, input):
pass
async def setLabel(self, d: ObjectDetectionResult, image: scrypted_sdk.Image):
try:
image_tensor = await prepare_text_result(d, image)
preds = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.predictTextModel(image_tensor),
)
d['label'] = process_text_result(preds)
except Exception as e:
traceback.print_exc()
pass
async def run_detection_image(
self, image: scrypted_sdk.Image, detection_session: ObjectDetectionSession
) -> ObjectsDetected:
ret = await super().run_detection_image(image, detection_session)
detections = ret["detections"]
# non max suppression on detections
for i in range(len(detections)):
d1 = detections[i]
if d1["score"] < self.minThreshold:
continue
for j in range(i + 1, len(detections)):
d2 = detections[j]
if d2["score"] < self.minThreshold:
continue
if d1["className"] != d2["className"]:
continue
l1, t1, w1, h1 = d1["boundingBox"]
l2, t2, w2, h2 = d2["boundingBox"]
r1 = l1 + w1
b1 = t1 + h1
r2 = l2 + w2
b2 = t2 + h2
left = max(l1, l2)
top = max(t1, t2)
right = min(r1, r2)
bottom = min(b1, b2)
if left < right and top < bottom:
area1 = (r1 - l1) * (b1 - t1)
area2 = (r2 - l2) * (b2 - t2)
intersect = (right - left) * (bottom - top)
iou = intersect / (area1 + area2 - intersect)
if iou > 0.5:
if d1["score"] > d2["score"]:
d2["score"] = 0
else:
d1["score"] = 0
# remove anything with score 0
ret["detections"] = [d for d in detections if d["score"] >= self.minThreshold]
futures: List[Future] = []
for d in ret["detections"]:
if d["className"] == "face":
futures.append(asyncio.ensure_future(self.setEmbedding(d, image)))
elif d["className"] == "plate":
futures.append(asyncio.ensure_future(self.setLabel(d, image)))
if len(futures):
await asyncio.wait(futures)
last = None
for d in ret['detections']:
if d["className"] != "face":
continue
check = d.get("embedding")
if check is None:
continue
# decode base64 string check
embedding = base64.b64decode(check)
embedding = np.frombuffer(embedding, dtype=np.float32)
if last is None:
last = embedding
continue
# convert to numpy float32 arrays
similarity = cosine_similarity(last, embedding)
print('similarity', similarity)
last = embedding
return ret

View File

@@ -5,7 +5,7 @@ import json
from PIL import Image
from pycoral.adapters import detect
from .common import *
from .tflite_common import *
loaded_py_coral = False
try:
@@ -26,12 +26,28 @@ import scrypted_sdk
import tflite_runtime.interpreter as tflite
from scrypted_sdk.types import Setting, SettingValue
import yolo
from common import yolo
from predict import PredictPlugin
availableModels = [
"Default",
"ssd_mobilenet_v2_coco_quant_postprocess",
"tf2_ssd_mobilenet_v2_coco17_ptq",
"ssdlite_mobiledet_coco_qat_postprocess",
"scrypted_yolov6n_320",
"scrypted_yolov6s_320",
"scrypted_yolov9c_320",
"scrypted_yolov8n_320",
"efficientdet_lite0_320_ptq",
"efficientdet_lite1_384_ptq",
"efficientdet_lite2_448_ptq",
"efficientdet_lite3_512_ptq",
"efficientdet_lite3x_640_ptq",
]
def parse_label_contents(contents: str):
lines = contents.splitlines()
lines = [line for line in lines if line.strip()]
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r"[:\s]+", content.strip(), maxsplit=1)
@@ -59,30 +75,39 @@ class TensorFlowLitePlugin(
edge_tpus = None
pass
model_version = "v12"
model_version = "v13"
model = self.storage.getItem("model") or "Default"
if model not in availableModels:
self.storage.setItem("model", "Default")
model = "Default"
defaultModel = model == "Default"
branch = "main"
labelsFile = None
def configureModel():
nonlocal labelsFile
nonlocal model
if defaultModel:
if edge_tpus and next((obj for obj in edge_tpus if obj['type'] == 'usb'), None):
model = "yolov8n_full_integer_quant_320"
# this model seems completely wacky with lots of false positives.
# might be broken?
# model = "ssdlite_mobiledet_coco_qat_postprocess"
if edge_tpus and next(
(obj for obj in edge_tpus if obj["type"] == "usb"), None
):
model = "ssdlite_mobiledet_coco_qat_postprocess"
else:
model = "efficientdet_lite0_320_ptq"
self.yolo = "yolo" in model
self.yolov8 = "yolov8" in model
self.yolov9 = "yolov9" in model
self.scrypted_model = "scrypted" in model
print(f'model: {model}')
print(f"model: {model}")
if self.yolo:
if self.scrypted_model:
labelsFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/tflite-models/{branch}/scrypted_labels.txt",
f"{model_version}/scrypted_labels.txt",
)
elif self.yolo:
labelsFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/tflite-models/{branch}/coco_80cl.txt",
f"{model_version}/coco_80cl.txt",
@@ -100,9 +125,10 @@ class TensorFlowLitePlugin(
self.interpreter_count = 0
def downloadModel():
tflite_model = "best_full_integer_quant" if self.scrypted_model else model
return self.downloadFile(
f"https://github.com/koush/tflite-models/raw/{branch}/{model}/{model}{suffix}.tflite",
f"{model_version}/{model}{suffix}.tflite",
f"https://github.com/koush/tflite-models/raw/{branch}/{model}/{tflite_model}{suffix}.tflite",
f"{model_version}/{tflite_model}{suffix}.tflite",
)
try:
@@ -168,19 +194,7 @@ class TensorFlowLitePlugin(
"key": "model",
"title": "Model",
"description": "The detection model used to find objects.",
"choices": [
"Default",
"ssd_mobilenet_v2_coco_quant_postprocess",
"tf2_ssd_mobilenet_v2_coco17_ptq",
"ssdlite_mobiledet_coco_qat_postprocess",
"yolov8n_full_integer_quant",
"yolov8n_full_integer_quant_320",
"efficientdet_lite0_320_ptq",
"efficientdet_lite1_384_ptq",
"efficientdet_lite2_448_ptq",
"efficientdet_lite3_512_ptq",
"efficientdet_lite3x_640_ptq",
],
"choices": availableModels,
"value": model,
},
]
@@ -197,12 +211,12 @@ class TensorFlowLitePlugin(
interpreter = self.interpreters.get()
try:
if self.yolo:
tensor_index = input_details(interpreter, 'index')
tensor_index = input_details(interpreter, "index")
im = np.stack([input])
i = interpreter.get_input_details()[0]
if i['dtype'] == np.int8:
scale, zero_point = i['quantization']
if i["dtype"] == np.int8:
scale, zero_point = i["quantization"]
if scale == 0.003986024297773838 and zero_point == -128:
# fast path for quantization 1/255 = 0.003986024297773838
im = im.view(np.int8)
@@ -217,18 +231,23 @@ class TensorFlowLitePlugin(
interpreter.invoke()
output_details = interpreter.get_output_details()
output = output_details[0]
x = interpreter.get_tensor(output['index'])
x = interpreter.get_tensor(output["index"])
input_scale = self.get_input_details()[0]
if x.dtype == np.int8:
scale, zero_point = output['quantization']
scale, zero_point = output["quantization"]
threshold = yolo.defaultThreshold / scale + zero_point
combined_scale = scale * input_scale
objs = yolo.parse_yolov8(x[0], threshold, scale=lambda v: (v - zero_point) * combined_scale, confidence_scale=lambda v: (v - zero_point) * scale)
objs = yolo.parse_yolov9(
x[0],
threshold,
scale=lambda v: (v - zero_point) * combined_scale,
confidence_scale=lambda v: (v - zero_point) * scale,
)
else:
# this code path is unused.
objs = yolo.parse_yolov8(x[0], scale=lambda v: v * input_scale)
objs = yolo.parse_yolov9(x[0], scale=lambda v: v * input_scale)
else:
common.set_input(interpreter, input)
tflite_common.set_input(interpreter, input)
interpreter.invoke()
objs = detect.get_objects(
interpreter, score_threshold=0.2, image_scale=(1, 1)

View File

@@ -1 +0,0 @@
../../openvino/src/yolo

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/unifi-protect",
"version": "0.0.145",
"version": "0.0.146",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/unifi-protect",
"version": "0.0.145",
"version": "0.0.146",
"license": "Apache",
"dependencies": {
"@koush/unifi-protect": "file:../../external/unifi-protect",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/unifi-protect",
"version": "0.0.145",
"version": "0.0.146",
"description": "Unifi Protect Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",

View File

@@ -182,12 +182,27 @@ export class UnifiProtect extends ScryptedDeviceBase implements Settings, Device
let detections: ObjectDetectionResult[] = [];
// const event = {
// type: 'smartDetectZone',
// start: 1713211066646,
// score: 80,
// smartDetectTypes: [ 'licensePlate', 'vehicle' ],
// smartDetectEvents: [],
// metadata: { licensePlate: { name: 'ABCDEFG', confidenceLevel: 90 } },
// camera: '64b2e59f0106eb03e4001210',
// partition: null,
// user: null,
// id: '661d86bf03e69c03e408d62a',
// modelKey: 'event'
// }
if (payload.type === 'smartDetectZone' || payload.type === 'smartDetectLine') {
unifiCamera.resetDetectionTimeout();
detections = payload.smartDetectTypes.map(type => ({
className: type,
score: payload.score,
label: (payload as any).metadata?.[type]?.name,
}));
}
else {

Some files were not shown because too many files have changed in this diff Show More