Compare commits

..

1 Commits

Author SHA1 Message Date
Koushik Dutta
fe165295fb postrelease 2024-03-23 12:34:03 -07:00
122 changed files with 1000 additions and 3174 deletions

View File

@@ -1,11 +1,11 @@
name: Build changed plugins
on:
# push:
# branches: ["main"]
# paths: ["plugins/**"]
# pull_request:
# paths: ["plugins/**"]
push:
branches: ["main"]
paths: ["plugins/**"]
pull_request:
paths: ["plugins/**"]
workflow_dispatch:
jobs:

View File

@@ -1 +0,0 @@
../../../../sdk/dist/src/settings-mixin.d.ts

View File

@@ -1 +0,0 @@
../../../../sdk/dist/src/storage-settings.d.ts

View File

@@ -1,10 +1,9 @@
import sdk, { MixinDeviceBase, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedInterfaceDescriptors, ScryptedMimeTypes } from "@scrypted/sdk";
import { StorageSettings } from "@scrypted/sdk/storage-settings";
import { SettingsMixinDeviceBase } from "@scrypted/sdk/settings-mixin";
import sdk, { MixinDeviceBase, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedInterfaceDescriptors } from "@scrypted/sdk";
import fs from 'fs';
import type { TranspileOptions } from "typescript";
import vm from "vm";
import { ScriptDevice } from "./monaco/script-device";
import path from 'path';
const { systemManager, deviceManager, mediaManager, endpointManager } = sdk;
@@ -29,13 +28,9 @@ export function readFileAsString(f: string) {
}
function getTypeDefs() {
const settingsMixinDefs = readFileAsString('@types/sdk/settings-mixin.d.ts');
const storageSettingsDefs = readFileAsString('@types/sdk/storage-settings.d.ts');
const scryptedTypesDefs = readFileAsString('@types/sdk/types.d.ts');
const scryptedIndexDefs = readFileAsString('@types/sdk/index.d.ts');
return {
settingsMixinDefs,
storageSettingsDefs,
scryptedIndexDefs,
scryptedTypesDefs,
};
@@ -69,7 +64,6 @@ export async function scryptedEval(device: ScryptedDeviceBase, script: string, e
fs: require('realfs'),
ScryptedDeviceBase,
MixinDeviceBase,
StorageSettings,
systemManager,
deviceManager,
endpointManager,
@@ -79,8 +73,6 @@ export async function scryptedEval(device: ScryptedDeviceBase, script: string, e
localStorage: device.storage,
device,
exports: {} as any,
SettingsMixinDeviceBase,
ScryptedMimeTypes,
ScryptedInterface,
ScryptedDeviceType,
// @ts-expect-error
@@ -181,16 +173,6 @@ export function createMonacoEvalDefaults(extraLibs: { [lib: string]: string }) {
"node_modules/@types/scrypted__sdk/types/index.d.ts"
);
monaco.languages.typescript.typescriptDefaults.addExtraLib(
libs['settingsMixin'],
"node_modules/@types/scrypted__sdk/settings-mixin.d.ts"
);
monaco.languages.typescript.typescriptDefaults.addExtraLib(
libs['storageSettings'],
"node_modules/@types/scrypted__sdk/storage-settings.d.ts"
);
monaco.languages.typescript.typescriptDefaults.addExtraLib(
libs['sdk'],
"node_modules/@types/scrypted__sdk/index.d.ts"

View File

@@ -136,17 +136,12 @@ export async function readLine(readable: Readable) {
}
export async function readString(readable: Readable | Promise<Readable>) {
const buffer = await readBuffer(readable);
return buffer.toString();
}
export async function readBuffer(readable: Readable | Promise<Readable>) {
const buffers: Buffer[] = [];
let data = '';
readable = await readable;
readable.on('data', buffer => {
buffers.push(buffer);
data += buffer.toString();
});
readable.resume();
await once(readable, 'end')
return Buffer.concat(buffers);
return data;
}

View File

@@ -1,7 +1,6 @@
{
"compilerOptions": {
"module": "Node16",
"moduleResolution": "Node16",
"module": "commonjs",
"target": "esnext",
"noImplicitAny": true,
"outDir": "./dist",

View File

@@ -1,6 +1,6 @@
# Home Assistant Addon Configuration
name: Scrypted
version: "20-jammy-full.s6-v0.97.0"
version: "18-jammy-full.s6-v0.93.0"
slug: scrypted
description: Scrypted is a high performance home video integration and automation platform
url: "https://github.com/koush/scrypted"

View File

@@ -7,8 +7,7 @@
# install script.
################################################################
ARG BASE="jammy"
ARG REPO="ubuntu"
FROM ${REPO}:${BASE} as header
FROM ubuntu:${BASE} as header
ENV DEBIAN_FRONTEND=noninteractive

View File

@@ -4,11 +4,11 @@ WORKDIR /
# Install miniconda
ENV CONDA_DIR /opt/conda
RUN apt update -y && apt -y install wget && wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
RUN wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh && \
/bin/bash ~/miniconda.sh -b -p /opt/conda
# Put conda in path so we can use conda activate
ENV PATH=$CONDA_DIR/bin:$PATH
RUN conda -y install -c conda-forge cudatoolkit cudnn
RUN conda install -c conda-forge cudatoolkit=11.2.2 cudnn=8.1.0
ENV CONDA_PREFIX=/opt/conda
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/

View File

@@ -10,7 +10,7 @@ function readyn() {
}
cd /tmp
SCRYPTED_VERSION=v0.96.0
SCRYPTED_VERSION=v0.93.0
SCRYPTED_TAR_ZST=scrypted-$SCRYPTED_VERSION.tar.zst
if [ -z "$VMID" ]
then

View File

@@ -1,12 +1,12 @@
{
"name": "scrypted",
"version": "1.3.14",
"version": "1.3.13",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "scrypted",
"version": "1.3.14",
"version": "1.3.13",
"license": "ISC",
"dependencies": {
"@scrypted/client": "^1.3.3",

View File

@@ -1,6 +1,6 @@
{
"name": "scrypted",
"version": "1.3.14",
"version": "1.3.13",
"description": "",
"main": "./dist/packages/cli/src/main.js",
"bin": {

View File

@@ -133,7 +133,11 @@ export async function serveMain(installVersion?: string) {
await startServer(installDir);
if (fs.existsSync(UPDATE_FILE)) {
if (fs.existsSync(EXIT_FILE)) {
console.log('Exiting.');
process.exit(1);
}
else if (fs.existsSync(UPDATE_FILE)) {
console.log('Update requested. Installing.');
await runCommandEatError('npm', '--prefix', installDir, 'install', '--production', '@scrypted/server@latest').catch(e => {
console.error('Update failed', e);
@@ -141,10 +145,6 @@ export async function serveMain(installVersion?: string) {
console.log('Exiting.');
process.exit(1);
}
else if (fs.existsSync(EXIT_FILE)) {
console.log('Exiting.');
process.exit(1);
}
else {
console.log(`Service unexpectedly exited. Restarting momentarily.`);
await sleep(10000);

View File

@@ -1,4 +1,4 @@
{
"scrypted.debugHost": "127.0.0.1",
"scrypted.debugHost": "scrypted-server",
}

View File

@@ -1,11 +1,6 @@
<details>
<summary>Changelog</summary>
### 0.3.1
alexa/google-home: fix potential vulnerability. do not allow local network control using cloud tokens belonging to a different user. the plugins are now locked to a specific scrypted cloud account once paired.
### 0.3.0
alexa/google-home: additional auth token checks to harden endpoints for cloud sharing

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/alexa",
"version": "0.3.2",
"version": "0.3.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/alexa",
"version": "0.3.2",
"version": "0.3.1",
"dependencies": {
"axios": "^1.3.4",
"uuid": "^9.0.0"

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/alexa",
"version": "0.3.2",
"version": "0.3.1",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",
"prescrypted-setup-project": "scrypted-package-json",

View File

@@ -27,7 +27,6 @@ class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, Mixi
json: true
},
syncedDevices: {
defaultValue: [],
multiple: true,
hide: true
},
@@ -67,10 +66,7 @@ class AlexaPlugin extends ScryptedDeviceBase implements HttpRequestHandler, Mixi
alexaHandlers.set('Alexa.Authorization/AcceptGrant', this.onAlexaAuthorization);
alexaHandlers.set('Alexa.Discovery/Discover', this.onDiscoverEndpoints);
this.start()
.catch(e => {
this.console.error('startup failed', e);
})
this.start();
}
async start() {

View File

@@ -10,7 +10,7 @@
"port": 10081,
"request": "attach",
"skipFiles": [
"**/plugin-console.*",
"**/plugin-remote-worker.*",
"<node_internals>/**"
],
"preLaunchTask": "scrypted: deploy+debug",

View File

@@ -1,21 +1,19 @@
{
"name": "@scrypted/amcrest",
"version": "0.0.150",
"version": "0.0.135",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/amcrest",
"version": "0.0.150",
"version": "0.0.135",
"license": "Apache",
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",
"content-type": "^1.0.5"
"@scrypted/sdk": "file:../../sdk"
},
"devDependencies": {
"@types/content-type": "^1.1.8",
"@types/node": "^20.11.30"
"@types/node": "^20.10.8"
}
},
"../../common": {
@@ -25,22 +23,23 @@
"dependencies": {
"@scrypted/sdk": "file:../sdk",
"@scrypted/server": "file:../server",
"http-auth-utils": "^5.0.1",
"http-auth-utils": "^3.0.2",
"node-fetch-commonjs": "^3.1.1",
"typescript": "^5.3.3"
},
"devDependencies": {
"@types/node": "^20.11.0",
"@types/node": "^20.10.8",
"ts-node": "^10.9.2"
}
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.3.29",
"version": "0.3.4",
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.18.6",
"adm-zip": "^0.4.13",
"axios": "^1.6.5",
"axios": "^0.21.4",
"babel-loader": "^9.1.0",
"babel-plugin-const-enum": "^1.1.0",
"esbuild": "^0.15.9",
@@ -78,29 +77,15 @@
"resolved": "../../sdk",
"link": true
},
"node_modules/@types/content-type": {
"version": "1.1.8",
"resolved": "https://registry.npmjs.org/@types/content-type/-/content-type-1.1.8.tgz",
"integrity": "sha512-1tBhmVUeso3+ahfyaKluXe38p+94lovUZdoVfQ3OnJo9uJC42JT7CBoN3k9HYhAae+GwiBYmHu+N9FZhOG+2Pg==",
"dev": true
},
"node_modules/@types/node": {
"version": "20.11.30",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
"integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"version": "20.10.8",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.10.8.tgz",
"integrity": "sha512-f8nQs3cLxbAFc00vEU59yf9UyGUftkPaLGfvbVOIDdx2i1b8epBqj2aNGyP19fiyXWvlmZ7qC1XLjAzw/OKIeA==",
"dev": true,
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/amcrest",
"version": "0.0.150",
"version": "0.0.135",
"description": "Amcrest Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",
@@ -36,11 +36,9 @@
},
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",
"content-type": "^1.0.5"
"@scrypted/sdk": "file:../../sdk"
},
"devDependencies": {
"@types/content-type": "^1.1.8",
"@types/node": "^20.11.30"
"@types/node": "^20.10.8"
}
}

View File

@@ -1,140 +1,10 @@
import { AuthFetchCredentialState, HttpFetchOptions, authHttpFetch } from '@scrypted/common/src/http-auth-fetch';
import { readLine } from '@scrypted/common/src/read-stream';
import { parseHeaders, readBody } from '@scrypted/common/src/rtsp-server';
import contentType from 'content-type';
import { IncomingMessage } from 'http';
import { EventEmitter, Readable } from 'stream';
import { Destroyable } from '../../rtsp/src/rtsp';
import { Readable } from 'stream';
import { getDeviceInfo } from './probe';
import { Point } from '@scrypted/sdk';
// Human
// {
// "Action" : "Cross",
// "Class" : "Normal",
// "CountInGroup" : 1,
// "DetectRegion" : [
// [ 455, 260 ],
// [ 3586, 260 ],
// [ 3768, 7580 ],
// [ 382, 7451 ]
// ],
// "Direction" : "Enter",
// "EventID" : 10181,
// "GroupID" : 0,
// "Name" : "Rule1",
// "Object" : {
// "Action" : "Appear",
// "BoundingBox" : [ 2856, 1280, 3880, 4880 ],
// "Center" : [ 3368, 3080 ],
// "Confidence" : 0,
// "LowerBodyColor" : [ 0, 0, 0, 0 ],
// "MainColor" : [ 0, 0, 0, 0 ],
// "ObjectID" : 863,
// "ObjectType" : "Human",
// "RelativeID" : 0,
// "Speed" : 0
// },
// "PTS" : 43380319830.0,
// "RuleID" : 2,
// "Track" : [],
// "UTC" : 1711446999,
// "UTCMS" : 701
// }
// Face
// {
// "CfgRuleId" : 1,
// "Class" : "FaceDetection",
// "CountInGroup" : 2,
// "DetectRegion" : null,
// "EventID" : 10360,
// "EventSeq" : 6,
// "Faces" : [
// {
// "BoundingBox" : [ 1504, 2336, 1728, 2704 ],
// "Center" : [ 1616, 2520 ],
// "ObjectID" : 94,
// "ObjectType" : "HumanFace",
// "RelativeID" : 0
// }
// ],
// "FrameSequence" : 8251212,
// "GroupID" : 6,
// "Mark" : 0,
// "Name" : "FaceDetection",
// "Object" : {
// "Action" : "Appear",
// "BoundingBox" : [ 1504, 2336, 1728, 2704 ],
// "Center" : [ 1616, 2520 ],
// "Confidence" : 19,
// "FrameSequence" : 8251212,
// "ObjectID" : 94,
// "ObjectType" : "HumanFace",
// "RelativeID" : 0,
// "SerialUUID" : "",
// "Source" : 0.0,
// "Speed" : 0,
// "SpeedTypeInternal" : 0
// },
// "Objects" : [
// {
// "Action" : "Appear",
// "BoundingBox" : [ 1504, 2336, 1728, 2704 ],
// "Center" : [ 1616, 2520 ],
// "Confidence" : 19,
// "FrameSequence" : 8251212,
// "ObjectID" : 94,
// "ObjectType" : "HumanFace",
// "RelativeID" : 0,
// "SerialUUID" : "",
// "Source" : 0.0,
// "Speed" : 0,
// "SpeedTypeInternal" : 0
// }
// ],
// "PTS" : 43774941350.0,
// "Priority" : 0,
// "RuleID" : 1,
// "RuleId" : 1,
// "Source" : -1280470024.0,
// "UTC" : 947510337,
// "UTCMS" : 0
// }
export interface AmcrestObjectDetails {
Action: string;
BoundingBox: Point;
Center: Point;
Confidence: number;
LowerBodyColor: [number, number, number, number];
MainColor: [number, number, number, number];
ObjectID: number;
ObjectType: string;
RelativeID: number;
Speed: number;
}
export interface AmcrestEventData {
Action: string;
Class: string;
CountInGroup: number;
DetectRegion: Point[];
Direction: string;
EventID: number;
GroupID: number;
Name: string;
Object: AmcrestObjectDetails;
PTS: number;
RuleID: number;
Track: any[];
UTC: number;
UTCMS: number;
}
export enum AmcrestEvent {
MotionStart = "Code=VideoMotion;action=Start",
MotionStop = "Code=VideoMotion;action=Stop",
MotionInfo = "Code=VideoMotionInfo;action=State",
AudioStart = "Code=AudioMutation;action=Start",
AudioStop = "Code=AudioMutation;action=Stop",
TalkInvite = "Code=_DoTalkAction_;action=Invite",
@@ -148,33 +18,8 @@ export enum AmcrestEvent {
DahuaTalkHangup = "Code=PassiveHungup;action=Start",
DahuaCallDeny = "Code=HungupPhone;action=Pulse",
DahuaTalkPulse = "Code=_CallNoAnswer_;action=Pulse",
FaceDetection = "Code=FaceDetection;action=Start",
SmartMotionHuman = "Code=SmartMotionHuman;action=Start",
SmartMotionVehicle = "Code=Vehicle;action=Start",
CrossLineDetection = "Code=CrossLineDetection;action=Start",
CrossRegionDetection = "Code=CrossRegionDetection;action=Start",
}
async function readAmcrestMessage(client: Readable): Promise<string[]> {
let currentHeaders: string[] = [];
while (true) {
const originalLine = await readLine(client);
const line = originalLine.trim();
if (!line)
return currentHeaders;
// dahua bugs out and sends message without a newline separating the body:
// Content-Length:39
// Code=AudioMutation;action=Start;index=0
if (!line.includes(':')) {
client.unshift(Buffer.from(originalLine + '\n'));
return currentHeaders;
}
currentHeaders.push(line);
}
}
export class AmcrestCameraClient {
credential: AuthFetchCredentialState;
@@ -233,8 +78,7 @@ export class AmcrestCameraClient {
return response.body;
}
async listenEvents(): Promise<Destroyable> {
const events = new EventEmitter();
async listenEvents() {
const url = `http://${this.ip}/cgi-bin/eventManager.cgi?action=attach&codes=[All]`;
console.log('preparing event listener', url);
@@ -242,117 +86,32 @@ export class AmcrestCameraClient {
url,
responseType: 'readable',
});
const stream: IncomingMessage = response.body;
(events as any).destroy = () => {
stream.destroy();
events.removeAllListeners();
};
stream.on('close', () => {
events.emit('close');
});
stream.on('end', () => {
events.emit('end');
});
stream.on('error', e => {
events.emit('error', e);
});
const stream = response.body;
stream.socket.setKeepAlive(true);
const ct = stream.headers['content-type'];
// make content type parsable as content disposition filename
const cd = contentType.parse(ct);
let { boundary } = cd.parameters;
boundary = `--${boundary}`;
const boundaryEnd = `${boundary}--`;
(async () => {
while (true) {
let ignore = await readLine(stream);
ignore = ignore.trim();
if (!ignore)
continue;
if (ignore === boundaryEnd)
continue;
// dahua bugs out and sends this.
if (ignore === 'HTTP/1.1 200 OK') {
const message = await readAmcrestMessage(stream);
this.console.log('ignoring dahua http message', message);
message.unshift('');
const headers = parseHeaders(message);
const body = await readBody(stream, headers);
if (body)
this.console.log('ignoring dahua http body', body);
continue;
}
if (ignore !== boundary) {
this.console.error('expected boundary but found', ignore);
this.console.error(response.headers);
throw new Error('expected boundary');
}
const message = await readAmcrestMessage(stream);
events.emit('data', message);
message.unshift('');
const headers = parseHeaders(message);
const body = await readBody(stream, headers);
const data = body.toString();
events.emit('data', data);
const parts = data.split(';');
let index: string;
try {
for (const part of parts) {
if (part.startsWith('index')) {
index = part.split('=')[1]?.trim();
}
}
}
catch (e) {
this.console.error('error parsing index', data);
}
let jsonData: any;
try {
for (const part of parts) {
if (part.startsWith('data')) {
jsonData = JSON.parse(part.split('=')[1]?.trim());
}
}
}
catch (e) {
this.console.error('error parsing data', data);
}
for (const event of Object.values(AmcrestEvent)) {
if (data.indexOf(event) !== -1) {
events.emit('event', event, index, data);
if (event === AmcrestEvent.SmartMotionHuman) {
events.emit('smart', 'person', jsonData);
}
else if (event === AmcrestEvent.SmartMotionVehicle) {
events.emit('smart', 'car', jsonData);
}
else if (event === AmcrestEvent.FaceDetection) {
events.emit('smart', 'face', jsonData);
}
else if (event === AmcrestEvent.CrossLineDetection || event === AmcrestEvent.CrossRegionDetection) {
const eventData: AmcrestEventData = jsonData;
if (eventData?.Object?.ObjectType === 'Human') {
events.emit('smart', 'person', eventData);
}
else if (eventData?.Object?.ObjectType === 'Vehicle') {
events.emit('smart', 'car', eventData);
}
}
stream.on('data', (buffer: Buffer) => {
const data = buffer.toString();
const parts = data.split(';');
let index: string;
try {
for (const part of parts) {
if (part.startsWith('index')) {
index = part.split('=')[1]?.trim();
}
}
}
})()
.catch(() => stream.destroy());
return events as any as Destroyable;
catch (e) {
this.console.error('error parsing index', data);
}
// this.console?.log('event', data);
for (const event of Object.values(AmcrestEvent)) {
if (data.indexOf(event) !== -1) {
stream.emit('event', event, index, data);
}
}
});
return stream;
}
async enableContinousRecording(channel: number) {

View File

@@ -1,11 +1,11 @@
import { ffmpegLogInitialOutput } from '@scrypted/common/src/media-helpers';
import { readLength } from "@scrypted/common/src/read-stream";
import sdk, { Camera, DeviceCreatorSettings, DeviceInformation, FFmpegInput, Intercom, Lock, MediaObject, MediaStreamOptions, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, Reboot, RequestPictureOptions, RequestRecordingStreamOptions, ResponseMediaStreamOptions, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting, VideoCameraConfiguration, VideoRecorder } from "@scrypted/sdk";
import sdk, { Camera, DeviceCreatorSettings, DeviceInformation, FFmpegInput, Intercom, Lock, MediaObject, MediaStreamOptions, Reboot, RequestPictureOptions, RequestRecordingStreamOptions, ResponseMediaStreamOptions, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting, VideoCameraConfiguration, VideoRecorder } from "@scrypted/sdk";
import child_process, { ChildProcess } from 'child_process';
import { PassThrough, Readable, Stream } from "stream";
import { OnvifIntercom } from "../../onvif/src/onvif-intercom";
import { RtspProvider, RtspSmartCamera, UrlMediaStreamOptions } from "../../rtsp/src/rtsp";
import { AmcrestCameraClient, AmcrestEvent, AmcrestEventData } from "./amcrest-api";
import { AmcrestCameraClient, AmcrestEvent } from "./amcrest-api";
const { mediaManager } = sdk;
@@ -22,13 +22,12 @@ function findValue(blob: string, prefix: string, key: string) {
return parts[1];
}
class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration, Camera, Intercom, Lock, VideoRecorder, Reboot, ObjectDetector {
class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration, Camera, Intercom, Lock, VideoRecorder, Reboot {
eventStream: Stream;
cp: ChildProcess;
client: AmcrestCameraClient;
videoStreamOptions: Promise<UrlMediaStreamOptions[]>;
onvifIntercom = new OnvifIntercom(this);
hasSmartDetection: boolean;
constructor(nativeId: string, provider: RtspProvider) {
super(nativeId, provider);
@@ -37,7 +36,6 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
this.storage.removeItem('amcrestDoorbell');
}
this.hasSmartDetection = this.storage.getItem('hasSmartDetection') === 'true';
this.updateDevice();
this.updateDeviceInfo();
}
@@ -186,19 +184,10 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
if (idx.toString() !== channelNumber)
return;
}
if (event === AmcrestEvent.MotionStart
|| event === AmcrestEvent.SmartMotionHuman
|| event === AmcrestEvent.SmartMotionVehicle
|| event === AmcrestEvent.CrossLineDetection
|| event === AmcrestEvent.CrossRegionDetection) {
if (event === AmcrestEvent.MotionStart) {
this.motionDetected = true;
resetMotionTimeout();
}
else if (event === AmcrestEvent.MotionInfo) {
// this seems to be a motion pulse
if (this.motionDetected)
resetMotionTimeout();
}
else if (event === AmcrestEvent.MotionStop) {
// use resetMotionTimeout
}
@@ -242,43 +231,9 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
}
});
events.on('smart', (className: string, data: AmcrestEventData) => {
if (!this.hasSmartDetection) {
this.hasSmartDetection = true;
this.storage.setItem('hasSmartDetection', 'true');
this.updateDevice();
}
const detected: ObjectsDetected = {
timestamp: Date.now(),
detections: [
{
score: 1,
className,
}
],
};
this.onDeviceEvent(ScryptedInterface.ObjectDetector, detected);
});
return events;
}
async getDetectionInput(detectionId: string, eventId?: any): Promise<MediaObject> {
return;
}
async getObjectTypes(): Promise<ObjectDetectionTypes> {
return {
classes: [
'person',
'face',
'car',
],
}
}
async getOtherSettings(): Promise<Setting[]> {
const ret = await super.getOtherSettings();
ret.push(
@@ -517,19 +472,13 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
if (isDoorbell || twoWayAudio) {
interfaces.push(ScryptedInterface.Intercom);
}
const enableDahuaLock = this.storage.getItem('enableDahuaLock') === 'true';
if (isDoorbell && doorbellType === DAHUA_DOORBELL_TYPE && enableDahuaLock) {
interfaces.push(ScryptedInterface.Lock);
}
const continuousRecording = this.storage.getItem('continuousRecording') === 'true';
if (continuousRecording)
interfaces.push(ScryptedInterface.VideoRecorder);
if (this.hasSmartDetection)
interfaces.push(ScryptedInterface.ObjectDetector);
this.provider.updateDevice(this.nativeId, this.name, interfaces, type);
}
@@ -572,7 +521,7 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
}
const doorbellType = this.storage.getItem('doorbellType');
// not sure if this all works, since i don't actually have a doorbell.
// good luck!
const channel = this.getRtspChannel() || '1';
@@ -599,22 +548,12 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
}
else {
args.push(
"-vn",
'-acodec', 'aac',
'-f', 'adts',
'pipe:3',
);
"-vn",
'-acodec', 'aac',
'-f', 'adts',
'pipe:3',
);
contentType = 'Audio/AAC';
// args.push(
// "-vn",
// '-acodec', 'pcm_mulaw',
// '-ac', '1',
// '-ar', '8000',
// '-sample_fmt', 's16',
// '-f', 'mulaw',
// 'pipe:3',
// );
// contentType = 'Audio/G.711A';
}
this.console.log('ffmpeg intercom', args);
@@ -634,19 +573,15 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
// seems the dahua doorbells preferred 1024 chunks. should investigate adts
// parsing and sending multipart chunks instead.
const passthrough = new PassThrough();
const abortController = new AbortController();
this.getClient().request({
url,
method: 'POST',
headers: {
'Content-Type': contentType,
'Content-Length': '9999999',
'Content-Length': '9999999'
},
signal: abortController.signal,
responseType: 'readable',
}, passthrough)
.catch(() => { })
.finally(() => this.console.log('request finished'))
}, passthrough);
try {
while (true) {
@@ -658,8 +593,7 @@ class AmcrestCamera extends RtspSmartCamera implements VideoCameraConfiguration,
}
finally {
this.console.log('audio finished');
passthrough.destroy();
abortController.abort();
passthrough.end();
}
this.stopIntercom();

View File

@@ -29,14 +29,9 @@ export async function getDeviceInfo(credential: AuthFetchCredentialState, addres
vals[k] = v.trim();
}
const ret = {
return {
deviceType: vals.deviceType,
hardwareVersion: vals.hardwareVersion,
serialNumber: vals.serialNumber,
};
if (!ret.deviceType && !ret.hardwareVersion && !ret.serialNumber)
throw new Error('not amcrest');
return ret;
}
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/bticino",
"version": "0.0.15",
"version": "0.0.13",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/bticino",
"version": "0.0.15",
"version": "0.0.13",
"dependencies": {
"@slyoldfox/sip": "^0.0.6-1",
"sdp": "^3.0.3",
@@ -30,23 +30,23 @@
"dependencies": {
"@scrypted/sdk": "file:../sdk",
"@scrypted/server": "file:../server",
"http-auth-utils": "^5.0.1",
"typescript": "^5.3.3"
"http-auth-utils": "^3.0.2",
"node-fetch-commonjs": "^3.1.1",
"typescript": "^4.4.3"
},
"devDependencies": {
"@types/node": "^20.11.0",
"ts-node": "^10.9.2"
"@types/node": "^16.9.0"
}
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.3.14",
"version": "0.3.2",
"dev": true,
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.18.6",
"adm-zip": "^0.4.13",
"axios": "^1.6.5",
"axios": "^0.21.4",
"babel-loader": "^9.1.0",
"babel-plugin-const-enum": "^1.1.0",
"esbuild": "^0.15.9",
@@ -1219,10 +1219,10 @@
"requires": {
"@scrypted/sdk": "file:../sdk",
"@scrypted/server": "file:../server",
"@types/node": "^20.11.0",
"http-auth-utils": "^5.0.1",
"ts-node": "^10.9.2",
"typescript": "^5.3.3"
"@types/node": "^16.9.0",
"http-auth-utils": "^3.0.2",
"node-fetch-commonjs": "^3.1.1",
"typescript": "^4.4.3"
}
},
"@scrypted/sdk": {
@@ -1232,7 +1232,7 @@
"@types/node": "^18.11.18",
"@types/stringify-object": "^4.0.0",
"adm-zip": "^0.4.13",
"axios": "^1.6.5",
"axios": "^0.21.4",
"babel-loader": "^9.1.0",
"babel-plugin-const-enum": "^1.1.0",
"esbuild": "^0.15.9",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/bticino",
"version": "0.0.15",
"version": "0.0.13",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",
"prescrypted-setup-project": "scrypted-package-json",

View File

@@ -17,12 +17,6 @@ import { ffmpegLogInitialOutput, safeKillFFmpeg, safePrintFFmpegArguments } from
import { PersistentSipManager } from './persistent-sip-manager';
import { InviteHandler } from './bticino-inviteHandler';
import { SipOptions, SipRequest } from '../../sip/src/sip-manager';
import fs from "fs"
import url from "url"
import path from 'path';
import { default as stream } from 'node:stream'
import type { ReadableStream } from 'node:stream/web'
import { finished } from "stream/promises";
import { get } from 'http'
import { ControllerApi } from './c300x-controller-api';
@@ -31,7 +25,6 @@ import { BticinoMuteSwitch } from './bticino-mute-switch';
const STREAM_TIMEOUT = 65000;
const { mediaManager } = sdk;
const BTICINO_CLIPS = path.join(process.env.SCRYPTED_PLUGIN_VOLUME, 'bticino-clips');
export class BticinoSipCamera extends ScryptedDeviceBase implements MotionSensor, DeviceProvider, Intercom, Camera, VideoCamera, Settings, BinarySensor, HttpRequestHandler, VideoClips, Reboot {
@@ -154,87 +147,11 @@ export class BticinoSipCamera extends ScryptedDeviceBase implements MotionSensor
});
}
async getVideoClip(videoId: string): Promise<MediaObject> {
const outputfile = await this.fetchAndConvertVoicemailMessage(videoId);
const fileURLToPath: string = url.pathToFileURL(outputfile).toString()
this.console.log(`Creating mediaObject for url: ${fileURLToPath}`)
return await mediaManager.createMediaObjectFromUrl(fileURLToPath);
}
private async fetchAndConvertVoicemailMessage(videoId: string) {
getVideoClip(videoId: string): Promise<MediaObject> {
let c300x = SipHelper.getIntercomIp(this)
const response = await fetch(`http://${c300x}:8080/voicemail?msg=${videoId}/aswm.avi&raw=true`);
const contentLength: number = Number(response.headers.get("Content-Length"));
const lastModified: Date = new Date(response.headers.get("Last-Modified-Time"));
const avifile = `${BTICINO_CLIPS}/${videoId}.avi`;
const outputfile = `${BTICINO_CLIPS}/${videoId}.mp4`;
if (!fs.existsSync(BTICINO_CLIPS)) {
this.console.log(`Creating clips dir at: ${BTICINO_CLIPS}`)
fs.mkdirSync(BTICINO_CLIPS);
}
if (fs.existsSync(avifile)) {
const stat = fs.statSync(avifile);
if (stat.size != contentLength || stat.mtime.getTime() != lastModified.getTime()) {
this.console.log(`Size ${stat.size} != ${contentLength} or time ${stat.mtime.getTime} != ${lastModified.getTime}`)
try {
fs.rmSync(avifile);
} catch (e) { }
try {
fs.rmSync(outputfile);
} catch (e) { }
} else {
this.console.log(`Keeping the cached video at ${avifile}`)
}
}
if (!fs.existsSync(avifile)) {
this.console.log("Starting download.")
await finished(stream.Readable.from(response.body as ReadableStream<Uint8Array>).pipe(fs.createWriteStream(avifile)));
this.console.log("Download finished.")
try {
this.console.log(`Setting mtime to ${lastModified}`)
fs.utimesSync(avifile, lastModified, lastModified);
} catch (e) { }
}
const ffmpegPath = await mediaManager.getFFmpegPath();
const ffmpegArgs = [
'-hide_banner',
'-nostats',
'-y',
'-i', avifile,
outputfile
];
safePrintFFmpegArguments(console, ffmpegArgs);
const cp = child_process.spawn(ffmpegPath, ffmpegArgs, {
stdio: ['pipe', 'pipe', 'pipe', 'pipe'],
});
const p = new Promise((resolveFunc) => {
cp.stdout.on("data", (x) => {
this.console.log(x.toString());
});
cp.stderr.on("data", (x) => {
this.console.error(x.toString());
});
cp.on("exit", (code) => {
resolveFunc(code);
});
});
let returnCode = await p;
this.console.log(`Converted file returned code: ${returnCode}`);
return outputfile;
const url = `http://${c300x}:8080/voicemail?msg=${videoId}/aswm.avi&raw=true`;
return mediaManager.createMediaObjectFromUrl(url);
}
getVideoClipThumbnail(thumbnailId: string): Promise<MediaObject> {
let c300x = SipHelper.getIntercomIp(this)
const url = `http://${c300x}:8080/voicemail?msg=${thumbnailId}/aswm.jpg&raw=true`;

View File

@@ -33,10 +33,8 @@ export class VoicemailHandler extends SipRequestHandler {
handle(request: SipRequest) {
const lastVoicemailMessageTimestamp : number = Number.parseInt( this.sipCamera.storage.getItem('lastVoicemailMessageTimestamp') ) || -1
const message : string = request.content.toString()
let matches : Array<RegExpMatchArray> = [...message.matchAll(/\*#8\*\*40\*([01])\*([01])\*/gm)]
if( matches && matches.length > 0 && matches[0].length > 0 ) {
this.sipCamera.console.debug( "Answering machine state: " + matches[0][1] + " / Welcome message state: " + matches[0][2] );
this.aswmIsEnabled = matches[0][1] == '1';
if( message.startsWith('*#8**40*0*0*') || message.startsWith('*#8**40*1*0*') ) {
this.aswmIsEnabled = message.startsWith('*#8**40*1*0*');
if( this.isEnabled() ) {
this.sipCamera.console.debug("Handling incoming answering machine reply")
const messages : string[] = message.split(';')
@@ -62,8 +60,6 @@ export class VoicemailHandler extends SipRequestHandler {
this.sipCamera.console.debug("No new messages since: " + lastVoicemailMessageTimestamp + " lastMessage: " + lastMessageTimestamp)
}
}
} else {
this.sipCamera.console.debug("Not handling message: " + message)
}
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/chromecast",
"version": "0.1.58",
"version": "0.1.57",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/chromecast",
"version": "0.1.58",
"version": "0.1.57",
"license": "Apache-2.0",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/chromecast",
"version": "0.1.58",
"version": "0.1.57",
"description": "Send video, audio, and text to speech notifications to Chromecast and Google Home devices",
"author": "Scrypted",
"license": "Apache-2.0",

View File

@@ -183,7 +183,7 @@ class CastDevice extends ScryptedDeviceBase implements MediaPlayer, Refresh, Eng
media = await mediaManager.createMediaObjectFromUrl(media);
}
}
else if (options?.mimeType?.startsWith('image/') || options?.mimeType?.startsWith('audio/')) {
else if (options?.mimeType?.startsWith('image/')) {
url = await mediaManager.convertMediaObjectToInsecureLocalUrl(media, options?.mimeType);
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/core",
"version": "0.3.23",
"version": "0.3.18",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/core",
"version": "0.3.23",
"version": "0.3.18",
"license": "Apache-2.0",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/core",
"version": "0.3.23",
"version": "0.3.18",
"description": "Scrypted Core plugin. Provides the UI, websocket, and engine.io APIs.",
"author": "Scrypted",
"license": "Apache-2.0",

View File

@@ -18,7 +18,7 @@ const { systemManager, deviceManager, endpointManager } = sdk;
export function getAddresses() {
const addresses: string[] = [];
for (const [iface, nif] of Object.entries(os.networkInterfaces())) {
if (iface.startsWith('en') || iface.startsWith('eth') || iface.startsWith('wlan') || iface.startsWith('net')) {
if (iface.startsWith('en') || iface.startsWith('eth') || iface.startsWith('wlan')) {
addresses.push(iface);
addresses.push(...nif.map(addr => addr.address));
}

View File

@@ -11,7 +11,7 @@ export async function checkLxcDependencies() {
let needRestart = false;
if (!process.version.startsWith('v20.')) {
const cp = child_process.spawn('sh', ['-c', 'apt update -y && curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && apt install -y nodejs']);
const cp = child_process.spawn('sh', ['-c', 'curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && apt install -y nodejs']);
const [exitCode] = await once(cp, 'exit');
if (exitCode !== 0)
sdk.log.a('Failed to install Node.js 20.x.');

View File

@@ -1,9 +1,9 @@
{
"compilerOptions": {
"module": "Node16",
"moduleResolution": "Node16",
"target": "esnext",
"module": "commonjs",
"target": "ES2021",
"resolveJsonModule": true,
"moduleResolution": "Node16",
"esModuleInterop": true,
"sourceMap": true
},

View File

@@ -57,10 +57,7 @@ export default {
t += `<tspan x='${x}' dy='${toffset}em'>${Math.round(detection.score * 100) / 100}</tspan>`
toffset -= 1.2;
}
const tname = detection.className
+ (detection.id || detection.label ? ':' : '')
+ (detection.id ? ` ${detection.id}` : '')
+ (detection.label ? ` ${detection.label}` : '')
const tname = detection.className + (detection.id ? `: ${detection.id}` : '')
t += `<tspan x='${x}' dy='${toffset}em'>${tname}</tspan>`
const fs = 30 * svgScale;

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/coreml",
"version": "0.1.45",
"version": "0.1.29",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/coreml",
"version": "0.1.45",
"version": "0.1.29",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -34,7 +34,6 @@
"type": "API",
"interfaces": [
"Settings",
"DeviceProvider",
"ObjectDetection",
"ObjectDetectionPreview"
]
@@ -42,5 +41,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.45"
"version": "0.1.29"
}

View File

@@ -1 +0,0 @@
../../openvino/src/common

View File

@@ -1,42 +1,24 @@
from __future__ import annotations
import ast
import asyncio
import concurrent.futures
import os
import re
from typing import Any, List, Tuple
from typing import Any, Tuple
import coremltools as ct
import scrypted_sdk
from PIL import Image
from scrypted_sdk import Setting, SettingValue
from common import yolo
from coreml.recognition import CoreMLRecognition
from predict import Prediction, PredictPlugin
from predict.rectangle import Rectangle
import yolo
from predict import Prediction, PredictPlugin, Rectangle
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "CoreML-Predict")
availableModels = [
"Default",
"scrypted_yolov9c_320",
"scrypted_yolov9c",
"scrypted_yolov6n_320",
"scrypted_yolov6n",
"scrypted_yolov6s_320",
"scrypted_yolov6s",
"scrypted_yolov8n_320",
"scrypted_yolov8n",
"ssdlite_mobilenet_v2",
"yolov4-tiny",
]
predictExecutor = concurrent.futures.ThreadPoolExecutor(8, "CoreML-Predict")
def parse_label_contents(contents: str):
lines = contents.split(",")
lines = [line for line in lines if line.strip()]
lines = contents.splitlines()
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r"[:\s]+", content.strip(), maxsplit=1)
@@ -47,49 +29,40 @@ def parse_label_contents(contents: str):
return ret
def parse_labels(userDefined):
yolo = userDefined.get("names") or userDefined.get("yolo.names")
if yolo:
j = ast.literal_eval(yolo)
ret = {}
for k, v in j.items():
ret[int(k)] = v
return ret
classes = userDefined.get("classes")
if not classes:
raise Exception("no classes found in model metadata")
return parse_label_contents(classes)
class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProvider):
class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
model = self.storage.getItem("model") or "Default"
if model == "Default" or model not in availableModels:
if model != "Default":
self.storage.setItem("model", "Default")
model = "scrypted_yolov9c_320"
if model == "Default":
model = "yolov8n_320"
self.yolo = "yolo" in model
self.scrypted_yolo = "scrypted_yolo" in model
self.scrypted_model = "scrypted" in model
model_version = "v7"
mlmodel = "model" if self.scrypted_yolo else model
self.yolov8 = "yolov8" in model
self.yolov9 = "yolov9" in model
model_version = "v2"
print(f"model: {model}")
if not self.yolo:
# todo convert these to mlpackage
labelsFile = self.downloadFile(
f"https://github.com/koush/coreml-models/raw/main/{model}/coco_labels.txt",
"coco_labels.txt",
)
modelFile = self.downloadFile(
f"https://github.com/koush/coreml-models/raw/main/{model}/{mlmodel}.mlmodel",
f"https://github.com/koush/coreml-models/raw/main/{model}/{model}.mlmodel",
f"{model}.mlmodel",
)
else:
if self.scrypted_yolo:
if self.yolov8:
modelFile = self.downloadFile(
f"https://github.com/koush/coreml-models/raw/main/{model}/{model}.mlmodel",
f"{model}.mlmodel",
)
elif self.yolov9:
files = [
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/weights/weight.bin",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{mlmodel}.mlmodel",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{model}.mlmodel",
f"{model}/{model}.mlpackage/Manifest.json",
]
@@ -104,7 +77,7 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/FeatureDescriptions.json",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/Metadata.json",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/weights/weight.bin",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{mlmodel}.mlmodel",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{model}.mlmodel",
f"{model}/{model}.mlpackage/Manifest.json",
]
@@ -115,42 +88,25 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
)
modelFile = os.path.dirname(p)
labelsFile = self.downloadFile(
f"https://github.com/koush/coreml-models/raw/main/{model}/coco_80cl.txt",
f"{model_version}/{model}/coco_80cl.txt",
)
self.model = ct.models.MLModel(modelFile)
self.modelspec = self.model.get_spec()
self.inputdesc = self.modelspec.description.input[0]
self.inputheight = self.inputdesc.type.imageType.height
self.inputwidth = self.inputdesc.type.imageType.width
self.input_name = self.model.get_spec().description.input[0].name
self.labels = parse_labels(self.modelspec.description.metadata.userDefined)
labels_contents = open(labelsFile, "r").read()
self.labels = parse_label_contents(labels_contents)
# csv in mobilenet model
# self.modelspec.description.metadata.userDefined['classes']
self.loop = asyncio.get_event_loop()
self.minThreshold = 0.2
asyncio.ensure_future(self.prepareRecognitionModels(), loop=self.loop)
async def prepareRecognitionModels(self):
try:
await scrypted_sdk.deviceManager.onDevicesChanged(
{
"devices": [
{
"nativeId": "recognition",
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
"interfaces": [
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
],
"name": "CoreML Recognition",
}
]
}
)
except:
pass
async def getDevice(self, nativeId: str) -> Any:
return CoreMLRecognition(nativeId)
async def getSettings(self) -> list[Setting]:
model = self.storage.getItem("model") or "Default"
return [
@@ -158,7 +114,14 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
"key": "model",
"title": "Model",
"description": "The detection model used to find objects.",
"choices": availableModels,
"choices": [
"Default",
"ssdlite_mobilenet_v2",
"yolov4-tiny",
"yolov8n",
"yolov8n_320",
"yolov9c_320",
],
"value": model,
},
]
@@ -174,23 +137,23 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
def get_input_size(self) -> Tuple[float, float]:
return (self.inputwidth, self.inputheight)
async def detect_batch(self, inputs: List[Any]) -> List[Any]:
out_dicts = await asyncio.get_event_loop().run_in_executor(
predictExecutor, lambda: self.model.predict(inputs)
)
return out_dicts
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
objs = []
# run in executor if this is the plugin loop
if self.yolo:
out_dict = await self.queue_batch({self.input_name: input})
input_name = "image" if self.yolov8 or self.yolov9 else "input_1"
if asyncio.get_event_loop() is self.loop:
out_dict = await asyncio.get_event_loop().run_in_executor(
predictExecutor, lambda: self.model.predict({input_name: input})
)
else:
out_dict = self.model.predict({input_name: input})
if self.scrypted_yolo:
if self.yolov8 or self.yolov9:
results = list(out_dict.values())[0][0]
objs = yolo.parse_yolov9(results)
objs = yolo.parse_yolov8(results)
ret = self.create_detection_result(objs, src_size, cvss)
return ret
@@ -224,12 +187,17 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.Settings, scrypted_sdk.DeviceProv
ret = self.create_detection_result(objs, src_size, cvss)
return ret
out_dict = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.model.predict(
if asyncio.get_event_loop() is self.loop:
out_dict = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.model.predict(
{"image": input, "confidenceThreshold": self.minThreshold}
),
)
else:
out_dict = self.model.predict(
{"image": input, "confidenceThreshold": self.minThreshold}
),
)
)
coordinatesList = out_dict["coordinates"].astype(float)

View File

@@ -1,132 +0,0 @@
from __future__ import annotations
import concurrent.futures
import os
import coremltools as ct
import numpy as np
# import Quartz
# from Foundation import NSData, NSMakeSize
# import Vision
from predict.recognize import RecognizeDetection
def euclidean_distance(arr1, arr2):
return np.linalg.norm(arr1 - arr2)
def cosine_similarity(vector_a, vector_b):
dot_product = np.dot(vector_a, vector_b)
norm_a = np.linalg.norm(vector_a)
norm_b = np.linalg.norm(vector_b)
similarity = dot_product / (norm_a * norm_b)
return similarity
predictExecutor = concurrent.futures.ThreadPoolExecutor(8, "Vision-Predict")
class CoreMLRecognition(RecognizeDetection):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
def downloadModel(self, model: str):
model_version = "v7"
mlmodel = "model"
files = [
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/weights/weight.bin",
f"{model}/{model}.mlpackage/Data/com.apple.CoreML/{mlmodel}.mlmodel",
f"{model}/{model}.mlpackage/Manifest.json",
]
for f in files:
p = self.downloadFile(
f"https://github.com/koush/coreml-models/raw/main/{f}",
f"{model_version}/{f}",
)
modelFile = os.path.dirname(p)
model = ct.models.MLModel(modelFile)
inputName = model.get_spec().description.input[0].name
return model, inputName
def predictDetectModel(self, input):
model, inputName = self.detectModel
out_dict = model.predict({inputName: input})
results = list(out_dict.values())[0][0]
return results
def predictFaceModel(self, input):
model, inputName = self.faceModel
out_dict = model.predict({inputName: input})
return out_dict["var_2167"][0]
def predictTextModel(self, input):
model, inputName = self.textModel
out_dict = model.predict({inputName: input})
preds = out_dict["linear_2"]
return preds
# def predictVision(self, input: Image.Image) -> asyncio.Future[list[Prediction]]:
# buffer = input.tobytes()
# myData = NSData.alloc().initWithBytes_length_(buffer, len(buffer))
# input_image = (
# Quartz.CIImage.imageWithBitmapData_bytesPerRow_size_format_options_(
# myData,
# 4 * input.width,
# NSMakeSize(input.width, input.height),
# Quartz.kCIFormatRGBA8,
# None,
# )
# )
# request_handler = Vision.VNImageRequestHandler.alloc().initWithCIImage_options_(
# input_image, None
# )
# loop = self.loop
# future = loop.create_future()
# def detect_face_handler(request, error):
# observations = request.results()
# if error:
# loop.call_soon_threadsafe(future.set_exception, Exception())
# else:
# objs = []
# for o in observations:
# confidence = o.confidence()
# bb = o.boundingBox()
# origin = bb.origin
# size = bb.size
# l = origin.x * input.width
# t = (1 - origin.y - size.height) * input.height
# w = size.width * input.width
# h = size.height * input.height
# prediction = Prediction(
# 0, confidence, from_bounding_box((l, t, w, h))
# )
# objs.append(prediction)
# loop.call_soon_threadsafe(future.set_result, objs)
# request = (
# Vision.VNDetectFaceRectanglesRequest.alloc().initWithCompletionHandler_(
# detect_face_handler
# )
# )
# error = request_handler.performRequests_error_([request], None)
# return future
# async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
# future = await asyncio.get_event_loop().run_in_executor(
# predictExecutor,
# lambda: self.predictVision(input),
# )
# objs = await future
# ret = self.create_detection_result(objs, src_size, cvss)
# return ret

View File

@@ -1,2 +1,6 @@
#
coremltools==7.1
Pillow>=5.4.1
# pillow for anything not intel linux, pillow-simd is available on x64 linux
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'

1
plugins/coreml/src/yolo Symbolic link
View File

@@ -0,0 +1 @@
../../openvino/src/yolo

View File

@@ -1,23 +1,22 @@
{
"name": "@scrypted/hikvision",
"version": "0.0.147",
"version": "0.0.137",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/hikvision",
"version": "0.0.147",
"version": "0.0.137",
"license": "Apache",
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",
"@types/xml2js": "^0.4.14",
"content-type": "^1.0.5",
"xml2js": "^0.6.2"
"@types/xml2js": "^0.4.11",
"lodash": "^4.17.21",
"xml2js": "^0.6.0"
},
"devDependencies": {
"@types/content-type": "^1.1.8",
"@types/node": "^20.11.30"
"@types/node": "^18.15.11"
}
},
"../../common": {
@@ -28,16 +27,17 @@
"@scrypted/sdk": "file:../sdk",
"@scrypted/server": "file:../server",
"http-auth-utils": "^5.0.1",
"node-fetch-commonjs": "^3.1.1",
"typescript": "^5.3.3"
},
"devDependencies": {
"@types/node": "^20.11.0",
"@types/node": "^20.10.8",
"ts-node": "^10.9.2"
}
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.3.29",
"version": "0.3.4",
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.18.6",
@@ -83,50 +83,33 @@
"resolved": "../../sdk",
"link": true
},
"node_modules/@types/content-type": {
"version": "1.1.8",
"resolved": "https://registry.npmjs.org/@types/content-type/-/content-type-1.1.8.tgz",
"integrity": "sha512-1tBhmVUeso3+ahfyaKluXe38p+94lovUZdoVfQ3OnJo9uJC42JT7CBoN3k9HYhAae+GwiBYmHu+N9FZhOG+2Pg==",
"dev": true
},
"node_modules/@types/node": {
"version": "20.11.30",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
"integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"dependencies": {
"undici-types": "~5.26.4"
}
"version": "18.15.11",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.11.tgz",
"integrity": "sha512-E5Kwq2n4SbMzQOn6wnmBjuK9ouqlURrcZDVfbo9ftDDTFt3nk7ZKK4GMOzoYgnpQJKcxwQw+lGaBvvlMo0qN/Q=="
},
"node_modules/@types/xml2js": {
"version": "0.4.14",
"resolved": "https://registry.npmjs.org/@types/xml2js/-/xml2js-0.4.14.tgz",
"integrity": "sha512-4YnrRemBShWRO2QjvUin8ESA41rH+9nQGLUGZV/1IDhi3SL9OhdpNC/MrulTWuptXKwhx/aDxE7toV0f/ypIXQ==",
"version": "0.4.11",
"resolved": "https://registry.npmjs.org/@types/xml2js/-/xml2js-0.4.11.tgz",
"integrity": "sha512-JdigeAKmCyoJUiQljjr7tQG3if9NkqGUgwEUqBvV0N7LM4HyQk7UXCnusRa1lnvXAEYJ8mw8GtZWioagNztOwA==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
"engines": {
"node": ">= 0.6"
}
"node_modules/lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
},
"node_modules/sax": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
"integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw=="
},
"node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
},
"node_modules/xml2js": {
"version": "0.6.2",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.2.tgz",
"integrity": "sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA==",
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.0.tgz",
"integrity": "sha512-eLTh0kA8uHceqesPqSE+VvO1CDDJWMwlQfB6LuN6T8w6MaDJ8Txm8P7s5cHD0miF0V+GGTZrDQfxPZQVsur33w==",
"dependencies": {
"sax": ">=0.6.0",
"xmlbuilder": "~11.0.0"
@@ -150,8 +133,9 @@
"requires": {
"@scrypted/sdk": "file:../sdk",
"@scrypted/server": "file:../server",
"@types/node": "^20.11.0",
"@types/node": "^20.10.8",
"http-auth-utils": "^5.0.1",
"node-fetch-commonjs": "^3.1.1",
"ts-node": "^10.9.2",
"typescript": "^5.3.3"
}
@@ -180,47 +164,33 @@
"webpack-bundle-analyzer": "^4.5.0"
}
},
"@types/content-type": {
"version": "1.1.8",
"resolved": "https://registry.npmjs.org/@types/content-type/-/content-type-1.1.8.tgz",
"integrity": "sha512-1tBhmVUeso3+ahfyaKluXe38p+94lovUZdoVfQ3OnJo9uJC42JT7CBoN3k9HYhAae+GwiBYmHu+N9FZhOG+2Pg==",
"dev": true
},
"@types/node": {
"version": "20.11.30",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
"integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
"requires": {
"undici-types": "~5.26.4"
}
"version": "18.15.11",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.11.tgz",
"integrity": "sha512-E5Kwq2n4SbMzQOn6wnmBjuK9ouqlURrcZDVfbo9ftDDTFt3nk7ZKK4GMOzoYgnpQJKcxwQw+lGaBvvlMo0qN/Q=="
},
"@types/xml2js": {
"version": "0.4.14",
"resolved": "https://registry.npmjs.org/@types/xml2js/-/xml2js-0.4.14.tgz",
"integrity": "sha512-4YnrRemBShWRO2QjvUin8ESA41rH+9nQGLUGZV/1IDhi3SL9OhdpNC/MrulTWuptXKwhx/aDxE7toV0f/ypIXQ==",
"version": "0.4.11",
"resolved": "https://registry.npmjs.org/@types/xml2js/-/xml2js-0.4.11.tgz",
"integrity": "sha512-JdigeAKmCyoJUiQljjr7tQG3if9NkqGUgwEUqBvV0N7LM4HyQk7UXCnusRa1lnvXAEYJ8mw8GtZWioagNztOwA==",
"requires": {
"@types/node": "*"
}
},
"content-type": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
"integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA=="
"lodash": {
"version": "4.17.21",
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
"integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
},
"sax": {
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/sax/-/sax-1.2.4.tgz",
"integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw=="
},
"undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
},
"xml2js": {
"version": "0.6.2",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.2.tgz",
"integrity": "sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA==",
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.6.0.tgz",
"integrity": "sha512-eLTh0kA8uHceqesPqSE+VvO1CDDJWMwlQfB6LuN6T8w6MaDJ8Txm8P7s5cHD0miF0V+GGTZrDQfxPZQVsur33w==",
"requires": {
"sax": ">=0.6.0",
"xmlbuilder": "~11.0.0"

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/hikvision",
"version": "0.0.147",
"version": "0.0.137",
"description": "Hikvision Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",
@@ -37,12 +37,11 @@
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",
"@types/xml2js": "^0.4.14",
"content-type": "^1.0.5",
"xml2js": "^0.6.2"
"@types/xml2js": "^0.4.11",
"lodash": "^4.17.21",
"xml2js": "^0.6.0"
},
"devDependencies": {
"@types/content-type": "^1.1.8",
"@types/node": "^20.11.30"
"@types/node": "^18.15.11"
}
}

View File

@@ -1,17 +1,8 @@
import { AuthFetchCredentialState, HttpFetchOptions, authHttpFetch } from '@scrypted/common/src/http-auth-fetch';
import { readLine } from '@scrypted/common/src/read-stream';
import { parseHeaders, readBody, readMessage } from '@scrypted/common/src/rtsp-server';
import contentType from 'content-type';
import { IncomingMessage } from 'http';
import { EventEmitter, Readable } from 'stream';
import { Destroyable } from '../../rtsp/src/rtsp';
import { Readable } from 'stream';
import { getDeviceInfo } from './probe';
export const detectionMap = {
human: 'person',
vehicle: 'car',
}
export function getChannel(channel: string) {
return channel || '101';
}
@@ -24,8 +15,6 @@ export enum HikvisionCameraEvent {
// <eventType>linedetection</eventType>
// <eventState>inactive</eventState>
LineDetection = "<eventType>linedetection</eventType>",
RegionEntrance = "<eventType>regionEntrance</eventType>",
RegionExit = "<eventType>regionExit</eventType>",
// <eventType>fielddetection</eventType>
// <eventState>active</eventState>
// <eventType>fielddetection</eventType>
@@ -42,7 +31,7 @@ export interface HikvisionCameraStreamSetup {
export class HikvisionCameraAPI {
credential: AuthFetchCredentialState;
deviceModel: Promise<string>;
listenerPromise: Promise<Destroyable>;
listenerPromise: Promise<IncomingMessage>;
constructor(public ip: string, username: string, password: string, public console: Console) {
this.credential = {
@@ -140,106 +129,35 @@ export class HikvisionCameraAPI {
return response.body;
}
async listenEvents(): Promise<Destroyable> {
const events = new EventEmitter();
(events as any).destroy = () => { };
async listenEvents() {
// support multiple cameras listening to a single single stream
if (!this.listenerPromise) {
const url = `http://${this.ip}/ISAPI/Event/notification/alertStream`;
let lastSmartDetection: string;
this.listenerPromise = this.request({
url,
responseType: 'readable',
}).then(response => {
const stream: IncomingMessage = response.body;
(events as any).destroy = () => {
stream.destroy();
events.removeAllListeners();
};
stream.on('close', () => {
this.listenerPromise = undefined;
events.emit('close');
});
stream.on('end', () => {
this.listenerPromise = undefined;
events.emit('end');
});
stream.on('error', e => {
this.listenerPromise = undefined;
events.emit('error', e);
});
const stream = response.body;
stream.socket.setKeepAlive(true);
const ct = stream.headers['content-type'];
// make content type parsable as content disposition filename
const cd = contentType.parse(ct);
let { boundary } = cd.parameters;
boundary = `--${boundary}`;
const boundaryEnd = `${boundary}--`;
(async () => {
while (true) {
let ignore = await readLine(stream);
ignore = ignore.trim();
if (!ignore)
continue;
if (ignore === boundaryEnd)
continue;
if (ignore !== boundary) {
this.console.error('expected boundary but found', ignore);
throw new Error('expected boundary');
}
const message = await readMessage(stream);
events.emit('data', message);
message.unshift('');
const headers = parseHeaders(message);
const body = await readBody(stream, headers);
try {
if (!headers['content-type'].includes('application/xml') && lastSmartDetection) {
if (!headers['content-type']?.startsWith('image/jpeg')) {
continue;
}
events.emit('smart', lastSmartDetection, body);
lastSmartDetection = undefined;
continue;
}
}
finally {
// is it possible that smart detections are sent without images?
// if so, flush this detection.
if (lastSmartDetection) {
events.emit('smart', lastSmartDetection);
}
}
const data = body.toString();
events.emit('data', data);
for (const event of Object.values(HikvisionCameraEvent)) {
if (data.indexOf(event) !== -1) {
const cameraNumber = data.match(/<channelID>(.*?)</)?.[1] || data.match(/<dynChannelID>(.*?)</)?.[1];
const inactive = data.indexOf('<eventState>inactive</eventState>') !== -1;
events.emit('event', event, cameraNumber, inactive, data);
if (event === HikvisionCameraEvent.LineDetection
|| event === HikvisionCameraEvent.RegionEntrance
|| event === HikvisionCameraEvent.RegionExit
|| event === HikvisionCameraEvent.FieldDetection) {
lastSmartDetection = data;
}
}
stream.on('data', (buffer: Buffer) => {
const data = buffer.toString();
for (const event of Object.values(HikvisionCameraEvent)) {
if (data.indexOf(event) !== -1) {
const cameraNumber = data.match(/<channelID>(.*?)</)?.[1] || data.match(/<dynChannelID>(.*?)</)?.[1];
const inactive = data.indexOf('<eventState>inactive</eventState>') !== -1;
stream.emit('event', event, cameraNumber, inactive, data);
}
}
})()
.catch(() => stream.destroy());
return events as any as Destroyable;
});
return stream;
});
this.listenerPromise.catch(() => this.listenerPromise = undefined);
this.listenerPromise.then(stream => {
stream.on('close', () => this.listenerPromise = undefined);
stream.on('end', () => this.listenerPromise = undefined);
});
}
return this.listenerPromise;

View File

@@ -1,12 +1,11 @@
import sdk, { Camera, DeviceCreatorSettings, DeviceInformation, FFmpegInput, Intercom, MediaObject, MediaStreamOptions, ObjectDetectionResult, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, Reboot, RequestPictureOptions, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting } from "@scrypted/sdk";
import crypto from 'crypto';
import sdk, { Camera, DeviceCreatorSettings, DeviceInformation, FFmpegInput, Intercom, MediaObject, MediaStreamOptions, Reboot, RequestPictureOptions, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, Setting } from "@scrypted/sdk";
import { PassThrough } from "stream";
import xml2js from 'xml2js';
import { RtpPacket } from '../../../external/werift/packages/rtp/src/rtp/rtp';
import { OnvifIntercom } from "../../onvif/src/onvif-intercom";
import { RtspProvider, RtspSmartCamera, UrlMediaStreamOptions } from "../../rtsp/src/rtsp";
import { startRtpForwarderProcess } from '../../webrtc/src/rtp-forwarders';
import { HikvisionCameraAPI, HikvisionCameraEvent, detectionMap } from "./hikvision-camera-api";
import { HikvisionCameraAPI, HikvisionCameraEvent } from "./hikvision-camera-api";
const { mediaManager } = sdk;
@@ -16,17 +15,15 @@ function channelToCameraNumber(channel: string) {
return channel.substring(0, channel.length - 2);
}
class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboot, ObjectDetector {
class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboot {
detectedChannels: Promise<Map<string, MediaStreamOptions>>;
client: HikvisionCameraAPI;
onvifIntercom = new OnvifIntercom(this);
activeIntercom: Awaited<ReturnType<typeof startRtpForwarderProcess>>;
hasSmartDetection: boolean;
constructor(nativeId: string, provider: RtspProvider) {
super(nativeId, provider);
this.hasSmartDetection = this.storage.getItem('hasSmartDetection') === 'true';
this.updateDevice();
this.updateDeviceInfo();
}
@@ -66,52 +63,41 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
let ignoreCameraNumber: boolean;
const motionTimeoutDuration = 20000;
// check if the camera+channel field is in use, and filter events.
const checkCameraNumber = async (cameraNumber: string) => {
// check if the camera+channel field is in use, and filter events.
if (this.getRtspChannel()) {
// it is possible to set it up to use a camera number
// on an nvr IP (which gives RTSP urls through the NVR), but then use a http port
// that gives a filtered event stream from only that camera.
// this this case, the camera numbers will not
// match as they will be always be "1".
// to detect that a camera specific endpoint is being used
// can look at the channel ids, and see if that camera number is found.
// this is different from the use case where the NVR or camera
// is using a port other than 80 (the default).
// could add a setting to have the user explicitly denote nvr usage
// but that is error prone.
const userCameraNumber = this.getCameraNumber();
if (ignoreCameraNumber === undefined && this.detectedChannels) {
const channelIds = (await this.detectedChannels).keys();
ignoreCameraNumber = true;
for (const id of channelIds) {
if (channelToCameraNumber(id) === userCameraNumber) {
ignoreCameraNumber = false;
break;
}
}
}
if (!ignoreCameraNumber && cameraNumber !== userCameraNumber) {
// this.console.error(`### Skipping motion event ${cameraNumber} != ${this.getCameraNumber()}`);
return false;
}
}
return true;
};
events.on('event', async (event: HikvisionCameraEvent, cameraNumber: string, inactive: boolean) => {
if (event === HikvisionCameraEvent.MotionDetected
|| event === HikvisionCameraEvent.LineDetection
|| event === HikvisionCameraEvent.RegionEntrance
|| event === HikvisionCameraEvent.RegionExit
|| event === HikvisionCameraEvent.FieldDetection) {
if (!await checkCameraNumber(cameraNumber))
return;
// check if the camera+channel field is in use, and filter events.
if (this.getRtspChannel()) {
// it is possible to set it up to use a camera number
// on an nvr IP (which gives RTSP urls through the NVR), but then use a http port
// that gives a filtered event stream from only that camera.
// this this case, the camera numbers will not
// match as they will be always be "1".
// to detect that a camera specific endpoint is being used
// can look at the channel ids, and see if that camera number is found.
// this is different from the use case where the NVR or camera
// is using a port other than 80 (the default).
// could add a setting to have the user explicitly denote nvr usage
// but that is error prone.
const userCameraNumber = this.getCameraNumber();
if (ignoreCameraNumber === undefined && this.detectedChannels) {
const channelIds = (await this.detectedChannels).keys();
ignoreCameraNumber = true;
for (const id of channelIds) {
if (channelToCameraNumber(id) === userCameraNumber) {
ignoreCameraNumber = false;
break;
}
}
}
if (!ignoreCameraNumber && cameraNumber !== userCameraNumber) {
// this.console.error(`### Skipping motion event ${cameraNumber} != ${this.getCameraNumber()}`);
return;
}
}
this.motionDetected = true;
clearTimeout(motionTimeout);
@@ -120,107 +106,11 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
this.motionDetected = false;
}, motionTimeoutDuration);
}
});
let inputDimensions: [number, number];
events.on('smart', async (data: string, image: Buffer) => {
if (!this.hasSmartDetection) {
this.hasSmartDetection = true;
this.storage.setItem('hasSmartDetection', 'true');
this.updateDevice();
}
const xml = await xml2js.parseStringPromise(data);
const [channelId] = xml.EventNotificationAlert.channelID;
if (!await checkCameraNumber(channelId)) {
this.console.warn('chann fail')
return;
}
const now = Date.now();
let detections: ObjectDetectionResult[] = xml.EventNotificationAlert?.DetectionRegionList?.map(region => {
const { DetectionRegionEntry } = region;
const dre = DetectionRegionEntry[0];
if (!DetectionRegionEntry)
return;
const { detectionTarget } = dre;
// const { TargetRect } = dre;
// const { X, Y, width, height } = TargetRect[0];
const [name] = detectionTarget;
return {
score: 1,
className: detectionMap[name] || name,
// boundingBox: [
// parseInt(X),
// parseInt(Y),
// parseInt(width),
// parseInt(height),
// ],
// movement: {
// moving: true,
// firstSeen: now,
// lastSeen: now,
// }
} as ObjectDetectionResult;
});
detections = detections?.filter(d => d);
if (!detections?.length)
return;
// if (inputDimensions === undefined && loadSharp()) {
// try {
// const { image: i, metadata } = await loadVipsMetadata(image);
// i.destroy();
// inputDimensions = [metadata.width, metadata.height];
// }
// catch (e) {
// inputDimensions = null;
// }
// finally {
// }
// }
let detectionId: string;
if (image) {
detectionId = crypto.randomBytes(4).toString('hex');
this.recentDetections.set(detectionId, image);
setTimeout(() => this.recentDetections.delete(detectionId), 10000);
}
const detected: ObjectsDetected = {
inputDimensions,
detectionId,
timestamp: now,
detections,
};
this.onDeviceEvent(ScryptedInterface.ObjectDetector, detected);
});
})
return events;
}
recentDetections = new Map<string, Buffer>();
async getDetectionInput(detectionId: string, eventId?: any): Promise<MediaObject> {
const image = this.recentDetections.get(detectionId);
if (!image)
return;
return mediaManager.createMediaObject(image, 'image/jpeg');
}
async getObjectTypes(): Promise<ObjectDetectionTypes> {
return {
classes: [
...Object.values(detectionMap),
]
}
}
createClient() {
return new HikvisionCameraAPI(this.getHttpAddress(), this.getUsername(), this.getPassword(), this.console);
}
@@ -394,9 +284,6 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
interfaces.push(ScryptedInterface.Intercom);
}
if (this.hasSmartDetection)
interfaces.push(ScryptedInterface.ObjectDetector);
this.provider.updateDevice(this.nativeId, this.name, interfaces, type);
}
@@ -521,7 +408,7 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
const put = this.getClient().request({
url,
method: 'PUT',
responseType: 'text',
responseType: 'readable',
headers: {
'Content-Type': 'application/octet-stream',
// 'Connection': 'close',
@@ -553,12 +440,6 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
forwarder.killPromise.finally(() => {
this.console.log('audio finished');
passthrough.end();
setTimeout(() => {
this.stopIntercom();
}, 1000);
});
put.finally(() => {
this.stopIntercom();
});
@@ -567,7 +448,7 @@ class HikvisionCamera extends RtspSmartCamera implements Camera, Intercom, Reboo
if (response.statusCode !== 200)
forwarder.kill();
})
.catch(() => forwarder.kill());
.catch(() => forwarder.kill());
}
async stopIntercom(): Promise<void> {
@@ -700,4 +581,4 @@ class HikvisionProvider extends RtspProvider {
}
}
export default HikvisionProvider;
export default new HikvisionProvider();

View File

@@ -1,4 +1,4 @@
{
"scrypted.debugHost": "127.0.0.1"
"scrypted.debugHost": "scrypted-server"
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/homekit",
"version": "1.2.54",
"version": "1.2.43",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/homekit",
"version": "1.2.54",
"version": "1.2.43",
"dependencies": {
"@koush/werift-src": "file:../../external/werift",
"check-disk-space": "^3.4.0",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/homekit",
"version": "1.2.54",
"version": "1.2.43",
"description": "HomeKit Plugin for Scrypted",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",

View File

@@ -66,7 +66,7 @@ export function createHAPUsername() {
}
export function getAddresses() {
const addresses = Object.entries(os.networkInterfaces()).filter(([iface]) => iface.startsWith('en') || iface.startsWith('eth') || iface.startsWith('wlan') || iface.startsWith('net')).map(([_, addr]) => addr).flat().map(info => info.address).filter(address => address);
const addresses = Object.entries(os.networkInterfaces()).filter(([iface]) => iface.startsWith('en') || iface.startsWith('eth') || iface.startsWith('wlan')).map(([_, addr]) => addr).flat().map(info => info.address).filter(address => address);
return addresses;
}
@@ -74,17 +74,13 @@ export function getRandomPort() {
return Math.round(30000 + Math.random() * 20000);
}
export function createHAPUsernameStorageSettingsDict(device: { storage: Storage, name?: string }, group: string, subgroup?: string): StorageSettingsDict<'mac' | 'addIdentifyingMaterial' | 'qrCode' | 'pincode' | 'portOverride' | 'resetAccessory'> {
export function createHAPUsernameStorageSettingsDict(device: { storage: Storage, name?: string }, group: string, subgroup?: string): StorageSettingsDict<'mac' | 'qrCode' | 'pincode' | 'portOverride' | 'resetAccessory'> {
const alertReload = () => {
sdk.log.a(`The HomeKit plugin will reload momentarily for the changes to ${device.name} to take effect.`);
sdk.deviceManager.requestRestart();
}
return {
addIdentifyingMaterial: {
hide: true,
type: 'boolean',
},
qrCode: {
group,
// subgroup,

View File

@@ -267,9 +267,6 @@ export class HomeKitPlugin extends ScryptedDeviceBase implements MixinProvider,
},
undefined, 'Pairing'));
storageSettings.settings.pincode.persistedDefaultValue = randomPinCode();
// TODO: change this value after this current default has been persisted to existing clients.
// changing it now will cause existing accessories be renamed.
storageSettings.settings.addIdentifyingMaterial.persistedDefaultValue = false;
const mixinConsole = deviceManager.getMixinConsole(device.id, this.nativeId);
@@ -280,7 +277,7 @@ export class HomeKitPlugin extends ScryptedDeviceBase implements MixinProvider,
published = true;
mixinConsole.log('Device is in accessory mode and is online. HomeKit services are being published.');
await this.publishAccessory(accessory, storageSettings.values.mac, storageSettings.values.pincode, standaloneCategory, storageSettings.values.portOverride, storageSettings.values.addIdentifyingMaterial);
await this.publishAccessory(accessory, storageSettings.values.mac, storageSettings.values.pincode, standaloneCategory, storageSettings.values.portOverride);
if (!hasPublished) {
hasPublished = true;
storageSettings.values.qrCode = accessory.setupURI();
@@ -423,7 +420,7 @@ export class HomeKitPlugin extends ScryptedDeviceBase implements MixinProvider,
return bind;
}
async publishAccessory(accessory: Accessory, username: string, pincode: string, category: Categories, port: number, addIdentifyingMaterial: boolean) {
async publishAccessory(accessory: Accessory, username: string, pincode: string, category: Categories, port: number) {
const bind = await this.getAdvertiserInterfaceBind();
await accessory.publish({
@@ -431,7 +428,7 @@ export class HomeKitPlugin extends ScryptedDeviceBase implements MixinProvider,
port,
pincode,
category,
addIdentifyingMaterial,
addIdentifyingMaterial: false,
advertiser: this.getAdvertiser(),
bind,
});

View File

@@ -103,22 +103,24 @@ addSupportedType({
const isRecordingEnabled = device.interfaces.includes(ScryptedInterface.MotionSensor);
let configuration: CameraRecordingConfiguration;
const openRecordingStreams = new Map<number, AsyncGenerator<RecordingPacket>>();
const openRecordingStreams = new Map<number, Deferred<any>>();
if (isRecordingEnabled) {
recordingDelegate = {
updateRecordingConfiguration(newConfiguration: CameraRecordingConfiguration) {
configuration = newConfiguration;
},
handleRecordingStreamRequest(streamId: number): AsyncGenerator<RecordingPacket> {
const ret = handleFragmentsRequests(streamId, device, configuration, console, homekitPlugin,
() => openRecordingStreams.has(streamId));
openRecordingStreams.set(streamId, ret);
const ret = handleFragmentsRequests(streamId, device, configuration, console, homekitPlugin);
const d = new Deferred<any>();
d.promise.then(reason => {
ret.throw(new Error(reason.toString()));
openRecordingStreams.delete(streamId);
});
openRecordingStreams.set(streamId, d);
return ret;
},
closeRecordingStream(streamId, reason) {
const r = openRecordingStreams.get(streamId);
r?.throw(new Error(reason?.toString()));
openRecordingStreams.delete(streamId);
openRecordingStreams.get(streamId)?.resolve(reason);
},
updateRecordingActive(active) {
},

View File

@@ -67,29 +67,12 @@ async function checkMp4StartsWithKeyFrame(console: Console, mp4: Buffer) {
await timeoutPromise(1000, new Promise(resolve => cp.on('exit', resolve)));
const h264 = Buffer.concat(buffers);
let offset = 0;
let countedZeroes = 0;
while (offset < h264.length - 6) {
const byte = h264[offset];
if (byte === 0) {
countedZeroes = Math.min(4, countedZeroes + 1);
if (h264.readInt32BE(offset) !== 1) {
offset++;
continue;
}
if (countedZeroes < 2) {
countedZeroes = 0;
offset++
continue;
}
countedZeroes = 0;
if (byte !== 1) {
offset++;
continue;
}
offset++;
offset += 4;
let naluType = h264.readUInt8(offset) & 0x1f;
if (naluType === NAL_TYPE_FU_A) {
offset++;
@@ -116,7 +99,7 @@ async function checkMp4StartsWithKeyFrame(console: Console, mp4: Buffer) {
}
export async function* handleFragmentsRequests(streamId: number, device: ScryptedDevice & VideoCamera & MotionSensor & AudioSensor,
configuration: CameraRecordingConfiguration, console: Console, homekitPlugin: HomeKitPlugin, isOpen: () => boolean): AsyncGenerator<RecordingPacket> {
configuration: CameraRecordingConfiguration, console: Console, homekitPlugin: HomeKitPlugin): AsyncGenerator<RecordingPacket> {
// homekitPlugin.storageSettings.values.lastKnownHomeHub = connection.remoteAddress;
@@ -194,7 +177,7 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
}
let audioArgs: string[];
if (!noAudio && (transcodeRecording || isDefinitelyNotAAC || debugMode.audio)) {
if (transcodeRecording || isDefinitelyNotAAC || debugMode.audio) {
if (!(transcodeRecording || debugMode.audio))
console.warn('Recording audio is not explicitly AAC, forcing transcoding. Setting audio output to AAC is recommended.', audioCodec);
@@ -319,7 +302,6 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
let needSkip = true;
let ftyp: Buffer[];
let moov: Buffer[];
for await (const box of generator) {
const { header, type, data } = box;
// console.log('motion fragment box', type);
@@ -332,7 +314,7 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
checkMp4 = false;
// pending will contain the moof
try {
if (false && !await checkMp4StartsWithKeyFrame(console, Buffer.concat([...ftyp, ...moov, ...pending, header, data]))) {
if (!await checkMp4StartsWithKeyFrame(console, Buffer.concat([...ftyp, ...moov, ...pending, header, data]))) {
needSkip = false;
pending = [];
continue;
@@ -361,19 +343,17 @@ export async function* handleFragmentsRequests(streamId: number, device: Scrypte
data: fragment,
isLast,
}
if (!isOpen())
return;
yield recordingPacket;
if (wasLast)
break;
}
}
console.log(`motion recording finished`);
}
catch (e) {
console.log(`motion recording completed ${e}`);
}
finally {
console.log(`motion recording finished`);
clearTimeout(videoTimeout);
cleanupPipes();
recordingFile?.end();

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/mqtt",
"version": "0.0.80",
"version": "0.0.77",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/mqtt",
"version": "0.0.80",
"version": "0.0.77",
"dependencies": {
"aedes": "^0.46.1",
"axios": "^0.23.0",

View File

@@ -41,5 +41,5 @@
"@types/node": "^18.4.2",
"@types/nunjucks": "^3.2.0"
},
"version": "0.0.80"
"version": "0.0.77"
}

View File

@@ -6,7 +6,6 @@ import nunjucks from 'nunjucks';
import sdk from "@scrypted/sdk";
import type { MqttProvider } from './main';
import { getHsvFromXyColor, getXyYFromHsvColor } from './color-util';
import { MqttEvent } from './api/mqtt-client';
const { deviceManager } = sdk;
@@ -26,7 +25,7 @@ typeMap.set('light', {
const interfaces = [ScryptedInterface.OnOff, ScryptedInterface.Brightness];
if (config.color_mode) {
config.supported_color_modes.forEach(color_mode => {
if (color_mode === 'xy')
if (color_mode === 'xy')
interfaces.push(ScryptedInterface.ColorSettingHsv);
else if (color_mode === 'hs')
interfaces.push(ScryptedInterface.ColorSettingHsv);
@@ -247,7 +246,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
return;
}
this.debounceCallbacks = new Map<string, Set<(payload: Buffer) => void>>();
this.debounceCallbacks = new Map<string, Set<(payload: Buffer) => void>>();
const { client } = provider;
client.on('message', this.listener.bind(this));
@@ -298,7 +297,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
this.console.log('binding...');
const { client } = this.provider;
this.debounceCallbacks = new Map<string, Set<(payload: Buffer) => void>>();
this.debounceCallbacks = new Map<string, Set<(payload: Buffer) => void>>();
if (this.providedInterfaces.includes(ScryptedInterface.Online)) {
const config = this.loadComponentConfig(ScryptedInterface.Online);
@@ -469,7 +468,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.command_off_template,
command, "ON");
} else {
this.publishValue(config.command_topic,
this.publishValue(config.command_topic,
undefined, command, command);
}
}
@@ -490,7 +489,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.command_on_template,
command, "ON");
} else {
this.publishValue(config.command_topic,
this.publishValue(config.command_topic,
undefined, command, command);
}
}
@@ -507,8 +506,8 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.brightness_value_template,
scaledBrightness, scaledBrightness);
} else {
this.publishValue(config.command_topic,
`{ "state": "${scaledBrightness === 0 ? 'OFF' : 'ON'}", "brightness": ${scaledBrightness} }`,
this.publishValue(config.command_topic,
`{ "state": "${ scaledBrightness === 0 ? 'OFF' : 'ON'}", "brightness": ${scaledBrightness} }`,
scaledBrightness, 255);
}
}
@@ -526,7 +525,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
if (kelvin >= 0 || kelvin <= 100) {
const min = await this.getTemperatureMinK();
const max = await this.getTemperatureMaxK();
const diff = (max - min) * (kelvin / 100);
const diff = (max - min) * (kelvin/100);
kelvin = Math.round(min + diff);
}
@@ -543,7 +542,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.color_temp_command_template,
color, color);
} else {
this.publishValue(config.command_topic,
this.publishValue(config.command_topic,
undefined, color, color);
}
}
@@ -568,7 +567,7 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.hs_command_template,
color, color);
} else {
this.publishValue(config.command_topic,
this.publishValue(config.command_topic,
undefined, color, color);
}
} else if (this.colorMode === "xy") {
@@ -590,12 +589,12 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
config.xy_command_template,
color, color);
} else {
this.publishValue(config.command_topic,
this.publishValue(config.command_topic,
undefined, color, color);
}
}
}
}
async lock(): Promise<void> {
const config = this.loadComponentConfig(ScryptedInterface.Lock);
return this.publishValue(config.command_topic,
@@ -611,9 +610,6 @@ export class MqttAutoDiscoveryDevice extends ScryptedDeviceBase implements Onlin
interface AutoDiscoveryConfig {
component: string;
create: (mqttId: string, device: MixinDeviceBase<any>, topic: string) => any;
subscriptions?: {
[topic: string]: (device: MixinDeviceBase<any>, event: MqttEvent) => void;
}
}
const autoDiscoveryMap = new Map<string, AutoDiscoveryConfig>();
@@ -680,31 +676,7 @@ autoDiscoveryMap.set(ScryptedInterface.HumiditySensor, {
}
});
autoDiscoveryMap.set(ScryptedInterface.OnOff, {
component: 'switch',
create(mqttId, device, topic) {
return {
payload_on: 'true',
payload_off: 'false',
state_topic: `${topic}/${ScryptedInterfaceProperty.on}`,
command_topic: `${topic}/${ScryptedInterfaceProperty.on}/set`,
...getAutoDiscoveryDevice(device, mqttId),
}
},
subscriptions: {
'on/set': (device, event) => {
const d = sdk.systemManager.getDeviceById<OnOff>(device.id);
if (event.json)
d.turnOn();
else
d.turnOff();
}
},
});
export function publishAutoDiscovery(mqttId: string, client: Client, device: MixinDeviceBase<any>, topic: string, subscribe: boolean, autoDiscoveryPrefix = 'homeassistant') {
const subs = new Set<string>();
export function publishAutoDiscovery(mqttId: string, client: Client, device: MixinDeviceBase<any>, topic: string, autoDiscoveryPrefix = 'homeassistant') {
for (const iface of device.interfaces) {
const found = autoDiscoveryMap.get(iface);
if (!found)
@@ -719,38 +691,5 @@ export function publishAutoDiscovery(mqttId: string, client: Client, device: Mix
client.publish(configTopic, JSON.stringify(config), {
retain: true,
});
if (subscribe) {
const subscriptions = found.subscriptions || {};
for (const subscriptionTopic of Object.keys(subscriptions || {})) {
subs.add(subscriptionTopic);
const fullTopic = topic + '/' + subscriptionTopic;
const cb = subscriptions[subscriptionTopic];
client.subscribe(fullTopic)
client.on('message', (messageTopic, message) => {
if (fullTopic !== messageTopic && fullTopic !== '/' + messageTopic)
return;
device.console.log('mqtt message', subscriptionTopic, message.toString());
cb(device, {
get text() {
return message.toString();
},
get json() {
try {
return JSON.parse(message.toString());
}
catch (e) {
}
},
get buffer() {
return message;
}
})
});
}
}
}
return subs;
}

View File

@@ -294,15 +294,13 @@ class MqttPublisherMixin extends SettingsMixinDeviceBase<any> {
allProperties.push(...properties);
}
let found: ReturnType<typeof publishAutoDiscovery>;
client.on('connect', packet => {
this.console.log('MQTT client connected, publishing current state.');
for (const method of allMethods) {
client.subscribe(this.pathname + '/' + method);
}
found = publishAutoDiscovery(this.provider.storageSettings.values.mqttId, client, this, this.pathname, true, 'homeassistant');
publishAutoDiscovery(this.provider.storageSettings.values.mqttId, client, this, this.pathname, 'homeassistant');
client.subscribe('homeassistant/status');
this.publishState(client);
});
@@ -313,17 +311,14 @@ class MqttPublisherMixin extends SettingsMixinDeviceBase<any> {
client.on('message', async (messageTopic, message) => {
if (messageTopic === 'homeassistant/status') {
publishAutoDiscovery(this.provider.storageSettings.values.mqttId, client, this, this.pathname, false, 'homeassistant');
publishAutoDiscovery(this.provider.storageSettings.values.mqttId, client, this, this.pathname, 'homeassistant');
this.publishState(client);
return;
}
const method = messageTopic.substring(this.pathname.length + 1);
if (!allMethods.includes(method)) {
if (!allProperties.includes(method)) {
if (!found?.has(method)) {
this.console.warn('unknown topic', method);
}
}
if (!allProperties.includes(method))
this.console.warn('unknown topic', method);
return;
}
try {
@@ -597,7 +592,7 @@ export class MqttProvider extends ScryptedDeviceBase implements DeviceProvider,
return isPublishable(type, interfaces) ? [ScryptedInterface.Settings] : undefined;
}
async getMixin(mixinDevice: any, mixinDeviceInterfaces: ScryptedInterface[], mixinDeviceState: WritableDeviceState): Promise<any> {
async getMixin(mixinDevice: any, mixinDeviceInterfaces: ScryptedInterface[], mixinDeviceState:WritableDeviceState): Promise<any> {
return new MqttPublisherMixin(this, {
mixinDevice,
mixinDeviceState,

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/objectdetector",
"version": "0.1.39",
"version": "0.1.33",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/objectdetector",
"version": "0.1.39",
"version": "0.1.33",
"license": "Apache-2.0",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/objectdetector",
"version": "0.1.39",
"version": "0.1.33",
"description": "Scrypted Video Analysis Plugin. Installed alongside a detection service like OpenCV or TensorFlow.",
"author": "Scrypted",
"license": "Apache-2.0",

View File

@@ -1,75 +0,0 @@
// visual similarity
const similarCharacters = [
['0', 'O', 'D'],
['1', 'I'],
['2', 'Z'],
['4', 'A'],
['5', 'S'],
['8', 'B'],
['6', 'G'],
// not sure about this one.
['A', '4'],
['C', 'G'],
['E', 'F'],
];
const similarCharactersMap = new Map<string, Set<string>>();
for (const similarCharacter of similarCharacters) {
for (const character of similarCharacter) {
if (!similarCharactersMap.has(character)) {
similarCharactersMap.set(character, new Set());
}
for (const similar of similarCharacter) {
similarCharactersMap.get(character)!.add(similar);
}
}
}
function isSameCharacter(c1: string, c2: string) {
if (c1 === c2)
return true;
return similarCharactersMap.get(c1)?.has(c2);
}
export function levenshteinDistance(str1: string, str2: string): number {
// todo: handle lower/uppercase similarity in similarCharacters above.
// ie, b is visualy similar to 6, but does not really look like B.
// others include e and C. v, u and Y. l, i, 1.
str1 = str1.toUpperCase();
str2 = str2.toUpperCase();
const len1 = str1.length;
const len2 = str2.length;
// If either string is empty, the distance is the length of the other string
if (len1 === 0) return len2;
if (len2 === 0) return len1;
let prev: number[] = new Array(len2 + 1);
let curr: number[] = new Array(len2 + 1);
// Initialize the first row of the matrix to be the index of the second string
for (let i = 0; i <= len2; i++) {
prev[i] = i;
}
for (let i = 1; i <= len1; i++) {
// Initialize the current row with the distance from the previous row's first element
curr[0] = i;
for (let j = 1; j <= len2; j++) {
let cost = isSameCharacter(str1.charAt(i - 1), str2.charAt(j - 1)) ? 0 : 1;
// Compute the minimum of three possible operations: insertion, deletion, or substitution
curr[j] = Math.min(prev[j] + 1, curr[j - 1] + 1, prev[j - 1] + cost);
}
// Swap the previous and current rows for the next iteration
const temp = prev;
prev = curr;
curr = temp;
}
return prev[len2];
}

View File

@@ -162,7 +162,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
getCurrentSettings() {
const settings = this.model.settings;
if (!settings)
return { id : this.id };
return;
const ret: { [key: string]: any } = {};
for (const setting of settings) {
@@ -183,10 +183,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
if (this.hasMotionType)
ret['motionAsObjects'] = true;
return {
...ret,
id: this.id,
};
return ret;
}
maybeStartDetection() {
@@ -913,10 +910,9 @@ class ObjectDetectorMixin extends MixinDeviceBase<ObjectDetection> implements Mi
let objectDetection = systemManager.getDeviceById<ObjectDetection>(this.id);
const hasMotionType = this.model.classes.includes('motion');
const group = hasMotionType ? 'Motion Detection' : 'Object Detection';
const model = await objectDetection.getDetectionModel({ id: mixinDeviceState.id });
// const group = objectDetection.name.replace('Plugin', '').trim();
const ret = new ObjectDetectionMixin(this.plugin, mixinDevice, mixinDeviceInterfaces, mixinDeviceState, this.mixinProviderNativeId, objectDetection, model, group, hasMotionType);
const ret = new ObjectDetectionMixin(this.plugin, mixinDevice, mixinDeviceInterfaces, mixinDeviceState, this.mixinProviderNativeId, objectDetection, this.model, group, hasMotionType);
this.currentMixins.add(ret);
return ret;
}

View File

@@ -1,7 +1,6 @@
import sdk, { Camera, EventListenerRegister, MediaObject, MotionSensor, ObjectDetector, ObjectsDetected, Readme, RequestPictureOptions, ResponsePictureOptions, ScryptedDevice, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedNativeId, Setting, SettingValue, Settings } from "@scrypted/sdk";
import { StorageSetting, StorageSettings } from "@scrypted/sdk/storage-settings";
import type { ObjectDetectionPlugin } from "./main";
import { levenshteinDistance } from "./edit-distance";
export const SMART_MOTIONSENSOR_PREFIX = 'smart-motionsensor-';
@@ -45,7 +44,7 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
defaultValue: 0.7,
},
requireDetectionThumbnail: {
title: 'Require Detections with Images',
title: 'Rquire Detections with Images',
description: 'When enabled, this sensor will ignore detections results that do not have images.',
type: 'boolean',
defaultValue: false,
@@ -56,21 +55,6 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
type: 'boolean',
defaultValue: false,
},
labels: {
group: 'Recognition',
title: 'Labels',
description: 'The labels (license numbers, names) that will trigger this smart motion sensor.',
multiple: true,
combobox: true,
choices: [],
},
labelDistance: {
group: 'Recognition',
title: 'Label Distance',
description: 'The maximum edit distance between the detected label and the desired label. Ie, a distance of 1 will match "abcde" to "abcbe" or "abcd".',
type: 'number',
defaultValue: 2,
},
});
listener: EventListenerRegister;
@@ -173,8 +157,6 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
if (this.storageSettings.values.requireDetectionThumbnail && !detected.detectionId)
return false;
const { labels, labelDistance } = this.storageSettings.values;
const match = detected.detections?.find(d => {
if (this.storageSettings.values.requireScryptedNvrDetections && !d.boundingBox)
return false;
@@ -199,27 +181,10 @@ export class SmartMotionSensor extends ScryptedDeviceBase implements Settings, R
this.console.warn('Camera does not provide Zones in detection event. Zone filter will not be applied.');
}
}
// when not searching for a label, validate the object is moving.
if (!labels?.length)
return !d.movement || d.movement.moving;
if (!d.label)
return false;
for (const label of labels) {
if (label === d.label)
return true;
if (!labelDistance)
continue;
if (levenshteinDistance(label, d.label) <= labelDistance)
return true;
this.console.log('No label does not match.', label, d.label);
}
return false;
});
if (!d.movement)
return true;
return d.movement.moving;
})
if (match) {
if (!this.motionDetected)
console.log('Smart Motion Sensor triggered on', match);

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/onvif",
"version": "0.1.14",
"version": "0.1.10",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/onvif",
"version": "0.1.14",
"version": "0.1.10",
"license": "Apache",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/onvif",
"version": "0.1.14",
"version": "0.1.10",
"description": "ONVIF Camera Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",

View File

@@ -97,7 +97,7 @@ export class OnvifIntercom implements Intercom {
this.camera.console.log('backchannel transport', transportDict);
const availableMatches = audioBackchannel.rtpmaps.filter(rtpmap => rtpmap.ffmpegEncoder);
const defaultMatch = audioBackchannel.rtpmaps.find(rtpmap => rtpmap.ffmpegEncoder === 'pcm_mulaw') || audioBackchannel.rtpmaps.find(rtpmap => rtpmap.ffmpegEncoder);
const defaultMatch = audioBackchannel.rtpmaps.find(rtpmap => rtpmap.ffmpegEncoder);
if (!defaultMatch)
throw new Error('no supported codec was found for back channel');
@@ -151,7 +151,7 @@ export class OnvifIntercom implements Intercom {
}
const elapsedRtpTimeMs = Math.abs(pending.header.timestamp - p.header.timestamp) / 8000 * 1000;
if (elapsedRtpTimeMs <= 160 && pending.payload.length + p.payload.length <= 1024) {
if (elapsedRtpTimeMs <= 60) {
pending.payload = Buffer.concat([pending.payload, p.payload]);
return;
}

View File

@@ -11,7 +11,7 @@
// local checkout
"scrypted.debugHost": "127.0.0.1",
"scrypted.serverRoot": "/Users/koush/.scrypted",
// "scrypted.debugHost": "koushik-winvm",
// "scrypted.debugHost": "koushik-windows",
// "scrypted.serverRoot": "C:\\Users\\koush\\.scrypted",
"scrypted.pythonRemoteRoot": "${config:scrypted.serverRoot}/volume/plugin.zip",

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/openvino",
"version": "0.1.77",
"version": "0.1.54",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/openvino",
"version": "0.1.77",
"version": "0.1.54",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -33,7 +33,6 @@
"runtime": "python",
"type": "API",
"interfaces": [
"DeviceProvider",
"Settings",
"ObjectDetection",
"ObjectDetectionPreview"
@@ -42,5 +41,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.77"
"version": "0.1.54"
}

View File

@@ -1,36 +0,0 @@
import concurrent.futures
from PIL import Image
import asyncio
from typing import Tuple
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
toThreadExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="image")
async def to_thread(f):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(toThreadExecutor, f)
async def ensureRGBData(data: bytes, size: Tuple[int, int], format: str):
if format == 'rgb':
return Image.frombuffer('RGB', size, data)
def convert():
rgba = Image.frombuffer('RGBA', size, data)
try:
return rgba.convert('RGB')
finally:
rgba.close()
return await to_thread(convert)
async def ensureRGBAData(data: bytes, size: Tuple[int, int], format: str):
if format == 'rgba':
return Image.frombuffer('RGBA', size, data)
# this path should never be possible as all the image sources should be capable of rgba.
def convert():
rgb = Image.frombuffer('RGB', size, data)
try:
return rgb.convert('RGBA')
finally:
rgb.close()
return await to_thread(convert)

View File

@@ -1,44 +0,0 @@
import numpy as np
def softmax(X, theta = 1.0, axis = None):
"""
Compute the softmax of each element along an axis of X.
Parameters
----------
X: ND-Array. Probably should be floats.
theta (optional): float parameter, used as a multiplier
prior to exponentiation. Default = 1.0
axis (optional): axis to compute values along. Default is the
first non-singleton axis.
Returns an array the same size as X. The result will sum to 1
along the specified axis.
"""
# make X at least 2d
y = np.atleast_2d(X)
# find axis
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
# multiply y against the theta parameter,
y = y * float(theta)
# subtract the max for numerical stability
y = y - np.expand_dims(np.max(y, axis = axis), axis)
# exponentiate y
y = np.exp(y)
# take the sum along the specified axis
ax_sum = np.expand_dims(np.sum(y, axis = axis), axis)
# finally: divide elementwise
p = y / ax_sum
# flatten if X was 1D
if len(X.shape) == 1: p = p.flatten()
return p

View File

@@ -1,98 +0,0 @@
from PIL import Image, ImageOps
from scrypted_sdk import (
ObjectDetectionResult,
)
import scrypted_sdk
import numpy as np
from common.softmax import softmax
from common.colors import ensureRGBData
import math
async def crop_text(d: ObjectDetectionResult, image: scrypted_sdk.Image, width: int, height: int):
l, t, w, h = d["boundingBox"]
l = math.floor(l)
t = math.floor(t)
w = math.floor(w)
h = math.floor(h)
format = image.format or 'rgb'
cropped = await image.toBuffer(
{
"crop": {
"left": l,
"top": t,
"width": w,
"height": h,
},
"format": format,
}
)
pilImage = await ensureRGBData(cropped, (w, h), format)
resized = pilImage.resize((width, height), resample=Image.LANCZOS).convert("L")
pilImage.close()
return resized
async def prepare_text_result(d: ObjectDetectionResult, image: scrypted_sdk.Image):
new_height = 64
new_width = int(d["boundingBox"][2] * new_height / d["boundingBox"][3])
textImage = await crop_text(d, image, new_width, new_height)
new_width = 256
# calculate padding dimensions
padding = (0, 0, new_width - textImage.width, 0)
# todo: clamp entire edge rather than just center
edge_color = textImage.getpixel((textImage.width - 1, textImage.height // 2))
# pad image
textImage = ImageOps.expand(textImage, padding, fill=edge_color)
# pil to numpy
image_array = np.array(textImage)
image_array = image_array.reshape(textImage.height, textImage.width, 1)
image_tensor = image_array.transpose((2, 0, 1)) / 255
# test normalize contrast
# image_tensor = (image_tensor - np.min(image_tensor)) / (np.max(image_tensor) - np.min(image_tensor))
image_tensor = (image_tensor - 0.5) / 0.5
image_tensor = np.expand_dims(image_tensor, axis=0)
return image_tensor
characters = "0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ €ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
dict_character = list(characters)
character = ["[blank]"] + dict_character # dummy '[blank]' token for CTCLoss (index 0)
def decode_greedy(text_index, length):
"""convert text-index into text-label."""
texts = []
index = 0
for l in length:
t = text_index[index : index + l]
# Returns a boolean array where true is when the value is not repeated
a = np.insert(~((t[1:] == t[:-1])), 0, True)
# Returns a boolean array where true is when the value is not in the ignore_idx list
b = ~np.isin(t, np.array(""))
# Combine the two boolean array
c = a & b
# Gets the corresponding character according to the saved indexes
text = "".join(np.array(character)[t[c.nonzero()]])
texts.append(text)
index += l
return texts
def process_text_result(preds):
preds_size = preds.shape[1]
# softmax preds using scipy
preds_prob = softmax(preds, axis=2)
# preds_prob = softmax(preds)
pred_norm = np.sum(preds_prob, axis=2)
preds_prob = preds_prob / np.expand_dims(pred_norm, axis=-1)
preds_index = np.argmax(preds_prob, axis=2)
preds_index = preds_index.reshape(-1)
preds_str = decode_greedy(preds_index, np.array([preds_size]))
# why index 0? are there multiple predictions?
return preds_str[0].replace('[blank]', '')

View File

@@ -1,85 +1,52 @@
from __future__ import annotations
import asyncio
import json
import re
from typing import Any, Tuple
import numpy as np
import openvino.runtime as ov
import scrypted_sdk
from PIL import Image
from scrypted_sdk.other import SettingValue
from scrypted_sdk.types import Setting
import concurrent.futures
import common.yolo as yolo
from predict import Prediction, PredictPlugin
from predict.rectangle import Rectangle
from .recognition import OpenVINORecognition
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "OpenVINO-Predict")
availableModels = [
"Default",
"scrypted_yolov6n_320",
"scrypted_yolov6n",
"scrypted_yolov6s_320",
"scrypted_yolov6s",
"scrypted_yolov9c_320",
"scrypted_yolov9c",
"scrypted_yolov8n_320",
"scrypted_yolov8n",
"ssd_mobilenet_v1_coco",
"ssdlite_mobilenet_v2",
"yolo-v3-tiny-tf",
"yolo-v4-tiny-tf",
]
from predict import PredictPlugin, Prediction, Rectangle
import numpy as np
import yolo
def parse_label_contents(contents: str):
lines = contents.splitlines()
lines = [line for line in lines if line.strip()]
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r"[:\s]+", content.strip(), maxsplit=1)
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
ret[int(pair[0])] = pair[1].strip()
else:
ret[row_number] = content.strip()
return ret
def param_to_string(parameters) -> str:
"""Convert a list / tuple of parameters returned from IE to a string."""
if isinstance(parameters, (list, tuple)):
return ", ".join([str(x) for x in parameters])
return ', '.join([str(x) for x in parameters])
else:
return str(parameters)
def dump_device_properties(core):
print("Available devices:")
print('Available devices:')
for device in core.available_devices:
print(f"{device} :")
print("\tSUPPORTED_PROPERTIES:")
for property_key in core.get_property(device, "SUPPORTED_PROPERTIES"):
if property_key not in (
"SUPPORTED_METRICS",
"SUPPORTED_CONFIG_KEYS",
"SUPPORTED_PROPERTIES",
):
print(f'{device} :')
print('\tSUPPORTED_PROPERTIES:')
for property_key in core.get_property(device, 'SUPPORTED_PROPERTIES'):
if property_key not in ('SUPPORTED_METRICS', 'SUPPORTED_CONFIG_KEYS', 'SUPPORTED_PROPERTIES'):
try:
property_val = core.get_property(device, property_key)
except TypeError:
property_val = "UNSUPPORTED TYPE"
print(f"\t\t{property_key}: {param_to_string(property_val)}")
print("")
property_val = 'UNSUPPORTED TYPE'
print(f'\t\t{property_key}: {param_to_string(property_val)}')
print('')
class OpenVINOPlugin(
PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings, scrypted_sdk.DeviceProvider
):
class OpenVINOPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
@@ -87,99 +54,70 @@ class OpenVINOPlugin(
dump_device_properties(self.core)
available_devices = self.core.available_devices
self.available_devices = available_devices
print("available devices: %s" % available_devices)
print('available devices: %s' % available_devices)
mode = self.storage.getItem("mode")
if mode == "Default":
mode = "AUTO"
mode = mode or "AUTO"
self.mode = mode
mode = self.storage.getItem('mode')
if mode == 'Default':
mode = 'AUTO'
mode = mode or 'AUTO'
precision = self.storage.getItem("precision") or "Default"
if precision == "Default":
precision = self.storage.getItem('precision') or 'Default'
if precision == 'Default':
using_mode = mode
if using_mode == "AUTO":
if "GPU" in available_devices:
using_mode = "GPU"
if using_mode == "GPU":
precision = "FP16"
if using_mode == 'AUTO':
if 'GPU' in available_devices:
using_mode = 'GPU'
if using_mode == 'GPU':
precision = 'FP16'
else:
precision = "FP32"
precision = 'FP32'
self.precision = precision
model = self.storage.getItem('model') or 'Default'
if model == 'Default':
model = 'yolov8n_320'
self.yolo = 'yolo' in model
self.yolov8 = "yolov8" in model
self.yolov9 = "yolov9" in model
self.sigmoid = model == 'yolo-v4-tiny-tf'
model = self.storage.getItem("model") or "Default"
if model == "Default" or model not in availableModels:
if model != "Default":
self.storage.setItem("model", "Default")
model = "scrypted_yolov8n_320"
self.yolo = "yolo" in model
self.scrypted_yolo = "scrypted_yolo" in model
self.scrypted_model = "scrypted" in model
self.sigmoid = model == "yolo-v4-tiny-tf"
print(f'model/mode/precision: {model}/{mode}/{precision}')
print(f"model/mode/precision: {model}/{mode}/{precision}")
ovmodel = "best" if self.scrypted_model else model
model_version = "v5"
xmlFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.xml",
f"{model_version}/{model}/{precision}/{ovmodel}.xml",
)
binFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.bin",
f"{model_version}/{model}/{precision}/{ovmodel}.bin",
)
if self.scrypted_model:
labelsFile = self.downloadFile(
"https://raw.githubusercontent.com/koush/openvino-models/main/scrypted_labels.txt",
"scrypted_labels.txt",
)
elif self.yolo:
labelsFile = self.downloadFile(
"https://raw.githubusercontent.com/koush/openvino-models/main/coco_80cl.txt",
"coco_80cl.txt",
)
model_version = 'v4'
xmlFile = self.downloadFile(f'https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{model}.xml', f'{model_version}/{precision}/{model}.xml')
binFile = self.downloadFile(f'https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{model}.bin', f'{model_version}/{precision}/{model}.bin')
if self.yolo:
labelsFile = self.downloadFile('https://raw.githubusercontent.com/koush/openvino-models/main/coco_80cl.txt', 'coco_80cl.txt')
else:
labelsFile = self.downloadFile(
"https://raw.githubusercontent.com/koush/openvino-models/main/coco_labels.txt",
"coco_labels.txt",
)
labelsFile = self.downloadFile('https://raw.githubusercontent.com/koush/openvino-models/main/coco_labels.txt', 'coco_labels.txt')
print(xmlFile, binFile, labelsFile)
try:
self.compiled_model = self.core.compile_model(xmlFile, mode)
print(
"EXECUTION_DEVICES",
self.compiled_model.get_property("EXECUTION_DEVICES"),
)
print("EXECUTION_DEVICES", self.compiled_model.get_property("EXECUTION_DEVICES"))
except:
import traceback
traceback.print_exc()
print("Reverting all settings.")
self.storage.removeItem("mode")
self.storage.removeItem("model")
self.storage.removeItem("precision")
self.storage.removeItem('mode')
self.storage.removeItem('model')
self.storage.removeItem('precision')
self.requestRestart()
# mobilenet 1,300,300,3
# yolov3/4 1,416,416,3
# yolov9 1,3,320,320
# yolov8 1,3,320,320
# second dim is always good.
self.model_dim = self.compiled_model.inputs[0].shape[2]
labels_contents = open(labelsFile, "r").read()
labels_contents = open(labelsFile, 'r').read()
self.labels = parse_label_contents(labels_contents)
asyncio.ensure_future(self.prepareRecognitionModels(), loop=self.loop)
async def getSettings(self) -> list[Setting]:
mode = self.storage.getItem("mode") or "Default"
model = self.storage.getItem("model") or "Default"
precision = self.storage.getItem("precision") or "Default"
mode = self.storage.getItem('mode') or 'Default'
model = self.storage.getItem('model') or 'Default'
precision = self.storage.getItem('precision') or 'Default'
return [
{
"title": "Available Devices",
@@ -189,38 +127,47 @@ class OpenVINOPlugin(
"key": "available_devices",
},
{
"key": "model",
"title": "Model",
"description": "The detection model used to find objects.",
"choices": availableModels,
"value": model,
'key': 'model',
'title': 'Model',
'description': 'The detection model used to find objects.',
'choices': [
'Default',
'ssd_mobilenet_v1_coco',
'ssdlite_mobilenet_v2',
'yolo-v3-tiny-tf',
'yolo-v4-tiny-tf',
'yolov8n',
'yolov8n_320',
'yolov9c_320',
],
'value': model,
},
{
"key": "mode",
"title": "Mode",
"description": "AUTO, CPU, or GPU mode to use for detections. Requires plugin reload. Use CPU if the system has unreliable GPU drivers.",
"choices": [
"Default",
"AUTO",
"CPU",
"GPU",
'key': 'mode',
'title': 'Mode',
'description': 'AUTO, CPU, or GPU mode to use for detections. Requires plugin reload. Use CPU if the system has unreliable GPU drivers.',
'choices': [
'Default',
'AUTO',
'CPU',
'GPU',
],
"value": mode,
"combobox": True,
'value': mode,
'combobox': True,
},
{
"key": "precision",
"title": "Precision",
"description": "The model floating point precision. FP16 is recommended for GPU. FP32 is recommended for CPU.",
"choices": [
"Default",
"FP16",
"FP32",
'key': 'precision',
'title': 'Precision',
'description': 'The model floating point precision. FP16 is recommended for GPU. FP32 is recommended for CPU.',
'choices': [
'Default',
'FP16',
'FP32',
],
"value": precision,
},
'value': precision,
}
]
async def putSetting(self, key: str, value: SettingValue):
self.storage.setItem(key, value)
await self.onDeviceEvent(scrypted_sdk.ScryptedInterface.Settings.value, None)
@@ -234,15 +181,30 @@ class OpenVINOPlugin(
return [self.model_dim, self.model_dim]
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
def predict(input_tensor):
async def predict():
infer_request = self.compiled_model.create_infer_request()
# the input_tensor can be created with the shared_memory=True parameter,
# but that seems to cause issues on some platforms.
if self.yolov8 or self.yolov9:
im = np.stack([input])
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
im = im.astype(np.float32) / 255.0
im = np.ascontiguousarray(im) # contiguous
im = ov.Tensor(array=im)
input_tensor = im
elif self.yolo:
input_tensor = ov.Tensor(array=np.expand_dims(np.array(input), axis=0).astype(np.float32))
else:
input_tensor = ov.Tensor(array=np.expand_dims(np.array(input), axis=0))
# Set input tensor for model with one input
infer_request.set_input_tensor(input_tensor)
output_tensors = infer_request.infer()
infer_request.start_async()
infer_request.wait()
objs = []
if self.scrypted_yolo:
objs = yolo.parse_yolov9(output_tensors[0][0])
if self.yolov8 or self.yolov9:
objs = yolo.parse_yolov8(infer_request.output_tensors[0].data[0])
return objs
if self.yolo:
@@ -252,21 +214,17 @@ class OpenVINOPlugin(
out_blob = infer_request.outputs[0]
else:
out_blob = infer_request.outputs[1]
# 13 13
objects = yolo.parse_yolo_region(
out_blob.data,
(input.width, input.height),
(81, 82, 135, 169, 344, 319),
self.sigmoid,
)
objects = yolo.parse_yolo_region(out_blob.data, (input.width, input.height),(81,82, 135,169, 344,319), self.sigmoid)
for r in objects:
obj = Prediction(
r["classId"],
r["confidence"],
Rectangle(r["xmin"], r["ymin"], r["xmax"], r["ymax"]),
)
obj = Prediction(r['classId'], r['confidence'], Rectangle(
r['xmin'],
r['ymin'],
r['xmax'],
r['ymax']
))
objs.append(obj)
# what about output[1]?
@@ -275,6 +233,7 @@ class OpenVINOPlugin(
return objs
output = infer_request.get_output_tensor(0)
for values in output.data[0][0].astype(float):
valid, index, confidence, l, t, r, b = values
@@ -289,59 +248,22 @@ class OpenVINOPlugin(
r = torelative(r)
b = torelative(b)
obj = Prediction(index - 1, confidence, Rectangle(l, t, r, b))
obj = Prediction(index - 1, confidence, Rectangle(
l,
t,
r,
b
))
objs.append(obj)
return objs
# the input_tensor can be created with the shared_memory=True parameter,
# but that seems to cause issues on some platforms.
if self.scrypted_yolo:
im = np.stack([input])
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
im = im.astype(np.float32) / 255.0
im = np.ascontiguousarray(im) # contiguous
im = ov.Tensor(array=im)
input_tensor = im
elif self.yolo:
input_tensor = ov.Tensor(
array=np.expand_dims(np.array(input), axis=0).astype(np.float32)
)
else:
input_tensor = ov.Tensor(array=np.expand_dims(np.array(input), axis=0))
try:
objs = await asyncio.get_event_loop().run_in_executor(
predictExecutor, lambda: predict(input_tensor)
)
objs = await predict()
except:
import traceback
traceback.print_exc()
raise
ret = self.create_detection_result(objs, src_size, cvss)
return ret
async def prepareRecognitionModels(self):
try:
await scrypted_sdk.deviceManager.onDevicesChanged(
{
"devices": [
{
"nativeId": "recognition",
"type": scrypted_sdk.ScryptedDeviceType.Builtin.value,
"interfaces": [
scrypted_sdk.ScryptedInterface.ObjectDetection.value,
],
"name": "OpenVINO Recognition",
}
]
}
)
except:
pass
async def getDevice(self, nativeId: str) -> Any:
return OpenVINORecognition(self, nativeId)

View File

@@ -1,71 +0,0 @@
from __future__ import annotations
import concurrent.futures
import openvino.runtime as ov
import numpy as np
from predict.recognize import RecognizeDetection
def euclidean_distance(arr1, arr2):
return np.linalg.norm(arr1 - arr2)
def cosine_similarity(vector_a, vector_b):
dot_product = np.dot(vector_a, vector_b)
norm_a = np.linalg.norm(vector_a)
norm_b = np.linalg.norm(vector_b)
similarity = dot_product / (norm_a * norm_b)
return similarity
class OpenVINORecognition(RecognizeDetection):
def __init__(self, plugin, nativeId: str | None = None):
self.plugin = plugin
super().__init__(nativeId=nativeId)
def downloadModel(self, model: str):
ovmodel = "best"
precision = self.plugin.precision
model_version = "v5"
xmlFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.xml",
f"{model_version}/{model}/{precision}/{ovmodel}.xml",
)
binFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/openvino-models/main/{model}/{precision}/{ovmodel}.bin",
f"{model_version}/{model}/{precision}/{ovmodel}.bin",
)
print(xmlFile, binFile)
return self.plugin.core.compile_model(xmlFile, self.plugin.mode)
def predictDetectModel(self, input):
infer_request = self.detectModel.create_infer_request()
im = np.stack([input])
im = im.transpose((0, 3, 1, 2)) # BHWC to BCHW, (n, 3, h, w)
im = im.astype(np.float32) / 255.0
im = np.ascontiguousarray(im) # contiguous
im = ov.Tensor(array=im)
input_tensor = im
infer_request.set_input_tensor(input_tensor)
infer_request.start_async()
infer_request.wait()
return infer_request.output_tensors[0].data[0]
def predictFaceModel(self, input):
im = ov.Tensor(array=input)
infer_request = self.faceModel.create_infer_request()
infer_request.set_input_tensor(im)
infer_request.start_async()
infer_request.wait()
return infer_request.output_tensors[0].data[0]
def predictTextModel(self, input):
input = input.astype(np.float32)
im = ov.Tensor(array=input)
infer_request = self.textModel.create_infer_request()
infer_request.set_input_tensor(im)
infer_request.start_async()
infer_request.wait()
return infer_request.output_tensors[0].data

View File

@@ -1,12 +1,12 @@
import sys
from math import exp
import numpy as np
from predict import Prediction
from predict.rectangle import Rectangle
from predict import Prediction, Rectangle
defaultThreshold = .2
def parse_yolov9(results, threshold = defaultThreshold, scale = None, confidence_scale = None):
def parse_yolov8(results, threshold = defaultThreshold, scale = None, confidence_scale = None):
objs = []
keep = np.argwhere(results[4:] > threshold)
for indices in keep:

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/python-codecs",
"version": "0.1.96",
"version": "0.1.95",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/python-codecs",
"version": "0.1.96",
"version": "0.1.95",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/python-codecs",
"version": "0.1.96",
"version": "0.1.95",
"description": "Python Codecs for Scrypted",
"keywords": [
"scrypted",

View File

@@ -1,7 +1,6 @@
import asyncio
import time
import traceback
import os
from typing import Any, AsyncGenerator, List, Union
import scrypted_sdk
@@ -203,7 +202,7 @@ def multiprocess_exit():
class CodecFork:
def timeoutExit(self):
def timeoutExit():
print("Frame yield timed out, exiting pipeline.")
multiprocess_exit()

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/reolink",
"version": "0.0.66",
"version": "0.0.63",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/reolink",
"version": "0.0.66",
"version": "0.0.63",
"license": "Apache",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/reolink",
"version": "0.0.66",
"version": "0.0.63",
"description": "Reolink Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/snapshot",
"version": "0.2.50",
"version": "0.2.40",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/snapshot",
"version": "0.2.50",
"version": "0.2.40",
"dependencies": {
"@types/node": "^20.10.6",
"sharp": "^0.33.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/snapshot",
"version": "0.2.50",
"version": "0.2.40",
"description": "Snapshot Plugin for Scrypted",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",

View File

@@ -19,6 +19,6 @@ export class ImageConverter extends ScryptedDeviceBase implements BufferConverte
const op = parseImageOp(mime.parameters);
const ffmpegInput = JSON.parse(data.toString()) as FFmpegInput;
return processImageOp(ffmpegInput, op, parseFloat(mime.parameters.get('time')), options?.sourceId, !!this.plugin.debugConsole);
return processImageOp(ffmpegInput, op, parseFloat(mime.parameters.get('time')), options?.sourceId, this.plugin.debugConsole);
}
}

View File

@@ -1,6 +1,5 @@
import sdk, { BufferConverter, Image, ImageOptions, MediaObject, MediaObjectOptions, ScryptedDeviceBase, ScryptedMimeTypes } from "@scrypted/sdk";
import type sharp from 'sharp';
import type { KernelEnum } from "sharp";
let hasLoadedSharp = false;
let sharpInstance: typeof sharp;
@@ -9,6 +8,8 @@ export function loadSharp() {
hasLoadedSharp = true;
try {
sharpInstance = require('sharp');
// not exposed by sharp but it exists.
(sharpInstance.kernel as any).linear = 'linear';
console.log('sharp loaded');
}
catch (e) {
@@ -59,8 +60,11 @@ export class VipsImage implements Image {
});
}
if (options?.resize) {
let kernel: keyof KernelEnum;
let kernel: string;
switch (options?.resize.filter) {
case 'bilinear':
kernel = 'linear';
break;
case 'lanczos':
kernel = 'lanczos2';
break;
@@ -71,13 +75,12 @@ export class VipsImage implements Image {
kernel = 'nearest';
break;
default:
kernel = 'cubic';
kernel = 'linear';
break
}
transformed.resize(typeof options.resize.width === 'number' ? Math.floor(options.resize.width) : undefined, typeof options.resize.height === 'number' ? Math.floor(options.resize.height) : undefined, {
// haven't decided if these are the correct defaults.
fit: options.resize.width && options.resize.height ? 'fill' : 'cover',
kernel: kernel,
fit: "cover",
kernel: kernel as any,
});
}
@@ -129,21 +132,13 @@ export class VipsImage implements Image {
}
}
export async function loadVipsMetadata(data: Buffer | string) {
export async function loadVipsImage(data: Buffer | string, sourceId: string) {
loadSharp();
const image = sharpInstance(data, {
failOn: 'none'
});
const metadata = await image.metadata();
return {
image,
metadata,
}
}
export async function loadVipsImage(data: Buffer | string, sourceId: string) {
loadSharp();
const { image, metadata } = await loadVipsMetadata(data);
const vipsImage = new VipsImage(image, metadata, sourceId);
return vipsImage;
}

View File

@@ -103,10 +103,7 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
type: 'clippath',
},
});
snapshotDebouncer = createMapPromiseDebouncer<{
picture: Buffer;
pictureTime: number;
}>();
snapshotDebouncer = createMapPromiseDebouncer<Buffer>();
errorPicture: RefreshPromise<Buffer>;
timeoutPicture: RefreshPromise<Buffer>;
progressPicture: RefreshPromise<Buffer>;
@@ -206,7 +203,6 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
const takePicture = await preparePrebufferSnapshot()
if (!takePicture)
throw e;
this.console.error('Snapshot failed, falling back to prebuffer', e);
return takePicture();
}
@@ -271,137 +267,123 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
}
async takePictureRaw(options?: RequestPictureOptions): Promise<Buffer> {
let rawPicturePromise: Promise<{
picture: Buffer;
pictureTime: number;
}>;
let picture: Buffer;
const eventSnapshot = options?.reason === 'event';
const periodicSnapshot = options?.reason === 'periodic';
// clear out snapshots that are too old.
if (this.currentPictureTime < Date.now() - 1 * 60 * 60 * 1000)
this.currentPicture = undefined;
const allowedSnapshotStaleness = eventSnapshot ? 0 : periodicSnapshot ? 20000 : 10000;
let needRefresh = true;
if (this.currentPicture && this.currentPictureTime > Date.now() - allowedSnapshotStaleness) {
this.debugConsole?.log('Using cached snapshot for', options?.reason);
rawPicturePromise = Promise.resolve({
picture: this.currentPicture,
pictureTime: this.currentPictureTime,
});
needRefresh = this.currentPictureTime < Date.now() - allowedSnapshotStaleness / 2;
}
if (needRefresh) {
try {
const debounced = this.snapshotDebouncer({
id: options?.id,
reason: options?.reason,
}, eventSnapshot ? 0 : 10000, async () => {
}, eventSnapshot ? 0 : 2000, async () => {
const snapshotTimer = Date.now();
let picture = await this.takePictureInternal();
picture = await this.cropAndScale(picture);
this.clearCachedPictures();
const pictureTime = Date.now();
this.currentPicture = picture;
this.currentPictureTime = pictureTime;
this.currentPictureTime = Date.now();
this.lastAvailablePicture = picture;
this.debugConsole?.debug(`Periodic snapshot took ${(this.currentPictureTime - snapshotTimer) / 1000} seconds to retrieve.`)
return {
picture,
pictureTime,
};
return picture;
});
debounced.catch(() => { });
rawPicturePromise ||= debounced;
}
// periodic snapshot should get the immediately available picture.
// the debounce has already triggered a refresh for the next go around.
if (periodicSnapshot && this.currentPicture) {
const cp = this.currentPicture;
debounced.catch(() => { });
const timeout = options.timeout || 1000;
try {
picture = await timeoutPromise(timeout, debounced);
}
catch (e) {
if (e instanceof TimeoutError)
this.debugConsole?.log(`Periodic snapshot took longer than ${timeout} seconds to retrieve, falling back to cached picture.`)
// prevent this from expiring
let availablePicture = this.currentPicture;
let availablePictureTime = this.currentPictureTime;
let rawPicture: Awaited<typeof rawPicturePromise>;
try {
const pictureTimeout = options?.timeout || (periodicSnapshot && availablePicture ? 1000 : 10000) || 10000;
rawPicture = await timeoutPromise(pictureTimeout, rawPicturePromise);
picture = cp;
}
}
else {
picture = await debounced;
}
}
catch (e) {
// a best effort was made to get a recent snapshot from cache or from a camera request,
// the cache request will never fail, but if the camera request fails,
// it may be ok to use a somewhat stale snapshot depending on reason.
// use the fallback cached picture if it is somewhat recent.
if (this.currentPictureTime < Date.now() - 1 * 60 * 60 * 1000)
this.currentPicture = undefined;
// event snapshot requests must not use cache since they're for realtime processing by homekit and nvr.
if (eventSnapshot)
throw e;
availablePicture = this.currentPicture || availablePicture;
if (!availablePicture)
if (!this.currentPicture)
return this.createErrorImage(e);
this.console.warn('Snapshot failed, but recovered from cache', e);
rawPicture = {
picture: availablePicture,
pictureTime: availablePictureTime,
};
// gc
availablePicture = undefined;
picture = this.currentPicture;
}
const needSoftwareResize = !!(options?.picture?.width || options?.picture?.height) && this.storageSettings.values.snapshotResolution !== 'Full Resolution';
if (needSoftwareResize) {
try {
picture = await this.snapshotDebouncer({
needSoftwareResize: true,
picture: options.picture,
}, eventSnapshot ? 0 : 2000, async () => {
this.debugConsole?.log("Resizing picture from camera", options?.picture);
if (!needSoftwareResize)
return rawPicture.picture;
try {
const key = {
pictureTime: rawPicture.pictureTime,
reason: options?.reason,
needSoftwareResize: true,
picture: options.picture,
};
const ret = await this.snapshotDebouncer(key, 10000, async () => {
this.debugConsole?.log("Resizing picture from camera", key);
if (loadSharp()) {
const vips = await loadVipsImage(rawPicture.picture, this.id);
try {
const ret = await vips.toBuffer({
resize: options?.picture,
format: 'jpg',
});
return {
picture: ret,
pictureTime: rawPicture.pictureTime,
};
if (loadSharp()) {
const vips = await loadVipsImage(picture, this.id);
try {
const ret = await vips.toBuffer({
resize: options?.picture,
format: 'jpg',
});
return ret;
}
finally {
vips.close();
}
}
finally {
vips.close();
}
}
const ret = await ffmpegFilterImageBuffer(rawPicture.picture, {
console: this.debugConsole,
ffmpegPath: await mediaManager.getFFmpegPath(),
resize: options?.picture,
timeout: 10000,
// try {
// const mo = await mediaManager.createMediaObject(picture, 'image/jpeg', {
// sourceId: this.id,
// });
// const image = await mediaManager.convertMediaObject<Image>(mo, ScryptedMimeTypes.Image);
// let { width, height } = options.picture;
// if (!width)
// width = height / image.height * image.width;
// if (!height)
// height = width / image.width * image.height;
// return await image.toBuffer({
// resize: {
// width,
// height,
// },
// format: 'jpg',
// });
// }
// catch (e) {
// if (!e.message?.includes('no converter found'))
// throw e;
// }
return ffmpegFilterImageBuffer(picture, {
console: this.debugConsole,
ffmpegPath: await mediaManager.getFFmpegPath(),
resize: options?.picture,
timeout: 10000,
});
});
return {
picture: ret,
pictureTime: rawPicture.pictureTime,
};
});
return ret.picture;
}
catch (e) {
if (eventSnapshot)
throw e;
return this.createErrorImage(e);
}
catch (e) {
if (eventSnapshot)
throw e;
return this.createErrorImage(e);
}
}
return picture;
}
async takePicture(options?: RequestPictureOptions): Promise<MediaObject> {
@@ -523,7 +505,6 @@ class SnapshotMixin extends SettingsMixinDeviceBase<Camera> implements Camera {
return this.progressPicture.promise;
}
else {
this.console.error('Snapshot failed', e);
this.errorPicture = singletonPromise(this.errorPicture,
() => this.createTextErrorImage('Snapshot Failed'));
return this.errorPicture.promise;

View File

@@ -1,6 +1,4 @@
import sdk, { FFmpegInput, RecordingStreamThumbnailOptions } from '@scrypted/sdk';
import { Console } from 'console';
import { PassThrough } from 'stream';
import url from 'url';
import type { MIMETypeParameters } from 'whatwg-mimetype';
import { FFmpegImageFilterOptions, ffmpegFilterImage, ffmpegFilterImageBuffer } from './ffmpeg-image-filter';
@@ -73,7 +71,7 @@ export function toImageOp(options: RecordingStreamThumbnailOptions) {
return ret;
}
export async function processImageOp(input: string | FFmpegInput | Buffer, op: ImageOp, time: number, sourceId: string, debug?: boolean): Promise<Buffer> {
export async function processImageOp(input: string | FFmpegInput | Buffer, op: ImageOp, time: number, sourceId: string, debugConsole: Console): Promise<Buffer> {
const { crop, resize } = op;
const { width, height, fractional } = resize || {};
const { left, top, right, bottom, fractional: cropFractional } = crop || {};
@@ -122,19 +120,8 @@ export async function processImageOp(input: string | FFmpegInput | Buffer, op: I
}
}
const out = new PassThrough();
let console = new Console(out, out);
const printConsole = () => {
if (!console)
return;
console = undefined;
const data = out.read().toString();
const deviceConsole = sdk.deviceManager.getMixinConsole(sourceId);
deviceConsole.log(data);
}
const ffmpegOpts: FFmpegImageFilterOptions = {
console,
console: debugConsole,
ffmpegPath: await sdk.mediaManager.getFFmpegPath(),
resize: width === undefined && height === undefined
? undefined
@@ -156,32 +143,22 @@ export async function processImageOp(input: string | FFmpegInput | Buffer, op: I
time,
};
try {
if (Buffer.isBuffer(input)) {
return await ffmpegFilterImageBuffer(input, ffmpegOpts);
}
const ffmpegInput: FFmpegInput = typeof input !== 'string'
? input
: {
inputArguments: [
'-i', input,
]
};
const args = [
...ffmpegInput.inputArguments,
...(ffmpegInput.h264EncoderArguments || []),
];
return await ffmpegFilterImage(args, ffmpegOpts);
}
catch (e) {
printConsole();
throw e;
}
finally {
if (debug)
printConsole();
if (Buffer.isBuffer(input)) {
return ffmpegFilterImageBuffer(input, ffmpegOpts);
}
const ffmpegInput: FFmpegInput = typeof input !== 'string'
? input
: {
inputArguments: [
'-i', input,
]
};
const args = [
...ffmpegInput.inputArguments,
...(ffmpegInput.h264EncoderArguments || []),
];
return ffmpegFilterImage(args, ffmpegOpts);
}

View File

@@ -9,7 +9,7 @@
// "scrypted.serverRoot": "/home/pi/.scrypted",
// lxc installation
"scrypted.debugHost": "scrypted-test",
"scrypted.debugHost": "scrypted-server",
"scrypted.serverRoot": "/root/.scrypted",
// local checkout

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/tensorflow-lite",
"version": "0.1.59",
"version": "0.1.48",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/tensorflow-lite",
"version": "0.1.59",
"version": "0.1.48",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -40,7 +40,7 @@
"arm64": "3.10"
},
"win32": {
"x64": "3.9"
"x64": "-3.9"
}
},
"type": "API",
@@ -53,5 +53,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.59"
"version": "0.1.48"
}

View File

@@ -1 +0,0 @@
../../openvino/src/common

View File

@@ -1,9 +1,9 @@
from __future__ import annotations
import asyncio
import concurrent.futures
import os
import re
import traceback
import urllib.request
from typing import Any, List, Tuple
@@ -12,60 +12,69 @@ from PIL import Image
from scrypted_sdk.types import (ObjectDetectionResult, ObjectDetectionSession,
ObjectsDetected, Setting)
import common.colors
from detect import DetectPlugin
import traceback
from .rectangle import (Rectangle, combine_rect, from_bounding_box,
intersect_area, intersect_rect, to_bounding_box)
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
toThreadExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="image")
async def to_thread(f):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(toThreadExecutor, f)
async def ensureRGBData(data: bytes, size: Tuple[int, int], format: str):
if format != 'rgba':
return Image.frombuffer('RGB', size, data)
def convert():
rgba = Image.frombuffer('RGBA', size, data)
try:
return rgba.convert('RGB')
finally:
rgba.close()
return await to_thread(convert)
def parse_label_contents(contents: str):
lines = contents.splitlines()
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
ret[int(pair[0])] = pair[1].strip()
else:
ret[row_number] = content.strip()
return ret
class Prediction:
def __init__(self, id: int, score: float, bbox: Tuple[float, float, float, float], embedding: str = None):
def __init__(self, id: int, score: float, bbox: Tuple[float, float, float, float]):
self.id = id
self.score = score
self.bbox = bbox
self.embedding = embedding
class PredictPlugin(DetectPlugin):
class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter):
labels: dict
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
# periodic restart of main plugin because there seems to be leaks in tflite or coral API.
if not nativeId:
loop = asyncio.get_event_loop()
loop.call_later(4 * 60 * 60, lambda: self.requestRestart())
self.batch: List[Tuple[Any, asyncio.Future]] = []
self.batching = 0
self.batch_flush = None
# periodic restart because there seems to be leaks in tflite or coral API.
loop = asyncio.get_event_loop()
loop.call_later(4 * 60 * 60, lambda: self.requestRestart())
def downloadFile(self, url: str, filename: str):
try:
filesPath = os.path.join(os.environ['SCRYPTED_PLUGIN_VOLUME'], 'files')
fullpath = os.path.join(filesPath, filename)
if os.path.isfile(fullpath):
return fullpath
tmp = fullpath + '.tmp'
print("Creating directory for", tmp)
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
print("Downloading", url)
response = urllib.request.urlopen(url)
if response.getcode() < 200 or response.getcode() >= 300:
raise Exception(f"Error downloading")
read = 0
with open(tmp, "wb") as f:
while True:
data = response.read(1024 * 1024)
if not data:
break
read += len(data)
print("Downloaded", read, "bytes")
f.write(data)
os.rename(tmp, fullpath)
filesPath = os.path.join(os.environ['SCRYPTED_PLUGIN_VOLUME'], 'files')
fullpath = os.path.join(filesPath, filename)
if os.path.isfile(fullpath):
return fullpath
except:
print("Error downloading", url)
import traceback
traceback.print_exc()
raise
os.makedirs(os.path.dirname(fullpath), exist_ok=True)
tmp = fullpath + '.tmp'
urllib.request.urlretrieve(url, tmp)
os.rename(tmp, fullpath)
return fullpath
def getClasses(self) -> list[str]:
return list(self.labels.values())
@@ -99,8 +108,6 @@ class PredictPlugin(DetectPlugin):
obj.bbox.xmin, obj.bbox.ymin, obj.bbox.xmax - obj.bbox.xmin, obj.bbox.ymax - obj.bbox.ymin)
detection['className'] = className
detection['score'] = obj.score
if hasattr(obj, 'embedding') and obj.embedding is not None:
detection['embedding'] = obj.embedding
detections.append(detection)
if convert_to_src_size:
@@ -130,41 +137,6 @@ class PredictPlugin(DetectPlugin):
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
pass
async def detect_batch(self, inputs: List[Any]) -> List[Any]:
pass
async def run_batch(self):
batch = self.batch
self.batch = []
self.batching = 0
if len(batch):
inputs = [x[0] for x in batch]
try:
results = await self.detect_batch(inputs)
for i, result in enumerate(results):
batch[i][1].set_result(result)
except Exception as e:
for i, result in enumerate(results):
batch[i][1].set_exception(e)
async def flush_batch(self):
self.batch_flush = None
await self.run_batch()
async def queue_batch(self, input: Any) -> List[Any]:
future = asyncio.Future(loop = asyncio.get_event_loop())
self.batch.append((input, future))
if self.batching:
self.batching = self.batching - 1
if self.batching:
# if there is any sort of error or backlog, .
if not self.batch_flush:
self.batch_flush = self.loop.call_later(.5, lambda: asyncio.ensure_future(self.flush_batch()))
return await future
await self.run_batch()
return await future
async def safe_detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
try:
f = self.detect_once(input, settings, src_size, cvss)
@@ -179,42 +151,26 @@ class PredictPlugin(DetectPlugin):
async def run_detection_image(self, image: scrypted_sdk.Image, detection_session: ObjectDetectionSession) -> ObjectsDetected:
settings = detection_session and detection_session.get('settings')
batch = (detection_session and detection_session.get('batch')) or 0
self.batching += batch
iw, ih = image.width, image.height
w, h = self.get_input_size()
if w is None or h is None:
resize = None
w = image.width
h = image.height
def cvss(point):
return point
else:
resize = None
xs = w / iw
ys = h / ih
def cvss(point):
return point[0] / xs, point[1] / ys
resize = None
xs = w / iw
ys = h / ih
def cvss(point):
return point[0] / xs, point[1] / ys
if iw != w or ih != h:
resize = {
'width': w,
'height': h,
}
if iw != w or ih != h:
resize = {
'width': w,
'height': h,
}
format = image.format or self.get_input_format()
b = await image.toBuffer({
'resize': resize,
'format': format,
'format': image.format or 'rgb',
})
if self.get_input_format() == 'rgb':
data = await common.colors.ensureRGBData(b, (w, h), format)
elif self.get_input_format() == 'rgba':
data = await common.colors.ensureRGBAData(b, (w, h), format)
data = await ensureRGBData(b, (w, h), image.format)
try:
ret = await self.safe_detect_once(data, settings, (iw, ih), cvss)
return ret

View File

@@ -1,230 +0,0 @@
from __future__ import annotations
import asyncio
from asyncio import Future
import base64
import concurrent.futures
import os
from typing import Any, Tuple, List
import numpy as np
# import Quartz
import scrypted_sdk
# from Foundation import NSData, NSMakeSize
from PIL import Image
from scrypted_sdk import (
Setting,
SettingValue,
ObjectDetectionSession,
ObjectsDetected,
ObjectDetectionResult,
)
import traceback
# import Vision
from predict import PredictPlugin
from common import yolo
from common.text import prepare_text_result, process_text_result
def euclidean_distance(arr1, arr2):
return np.linalg.norm(arr1 - arr2)
def cosine_similarity(vector_a, vector_b):
dot_product = np.dot(vector_a, vector_b)
norm_a = np.linalg.norm(vector_a)
norm_b = np.linalg.norm(vector_b)
similarity = dot_product / (norm_a * norm_b)
return similarity
predictExecutor = concurrent.futures.ThreadPoolExecutor(1, "Recognize")
class RecognizeDetection(PredictPlugin):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
self.inputheight = 640
self.inputwidth = 640
self.labels = {
0: "face",
1: "plate",
2: "text",
}
self.loop = asyncio.get_event_loop()
self.minThreshold = 0.7
self.detectModel = self.downloadModel("scrypted_yolov9c_flt")
self.textModel = self.downloadModel("vgg_english_g2")
self.faceModel = self.downloadModel("inception_resnet_v1")
def downloadModel(self, model: str):
pass
async def getSettings(self) -> list[Setting]:
pass
async def putSetting(self, key: str, value: SettingValue):
self.storage.setItem(key, value)
await self.onDeviceEvent(scrypted_sdk.ScryptedInterface.Settings.value, None)
await scrypted_sdk.deviceManager.requestRestart()
# width, height, channels
def get_input_details(self) -> Tuple[int, int, int]:
return (self.inputwidth, self.inputheight, 3)
def get_input_size(self) -> Tuple[float, float]:
return (self.inputwidth, self.inputheight)
def get_input_format(self) -> str:
return "rgb"
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
results = await asyncio.get_event_loop().run_in_executor(
predictExecutor, lambda: self.predictDetectModel(input)
)
objs = yolo.parse_yolov9(results)
ret = self.create_detection_result(objs, src_size, cvss)
return ret
async def setEmbedding(self, d: ObjectDetectionResult, image: scrypted_sdk.Image):
try:
l, t, w, h = d["boundingBox"]
face = await image.toBuffer(
{
"crop": {
"left": l,
"top": t,
"width": w,
"height": h,
},
"resize": {
"width": 160,
"height": 160,
},
"format": "rgb",
}
)
faceImage = Image.frombuffer("RGB", (160, 160), face)
image_tensor = np.array(faceImage).astype(np.float32).transpose([2, 0, 1])
processed_tensor = (image_tensor - 127.5) / 128.0
processed_tensor = np.expand_dims(processed_tensor, axis=0)
output = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.predictFaceModel(processed_tensor)
)
b = output.tobytes()
embedding = base64.b64encode(b).decode("utf-8")
d["embedding"] = embedding
except Exception as e:
traceback.print_exc()
pass
def predictTextModel(self, input):
pass
def predictDetectModel(self, input):
pass
def predictFaceModel(self, input):
pass
async def setLabel(self, d: ObjectDetectionResult, image: scrypted_sdk.Image):
try:
image_tensor = await prepare_text_result(d, image)
preds = await asyncio.get_event_loop().run_in_executor(
predictExecutor,
lambda: self.predictTextModel(image_tensor),
)
d['label'] = process_text_result(preds)
except Exception as e:
traceback.print_exc()
pass
async def run_detection_image(
self, image: scrypted_sdk.Image, detection_session: ObjectDetectionSession
) -> ObjectsDetected:
ret = await super().run_detection_image(image, detection_session)
detections = ret["detections"]
# non max suppression on detections
for i in range(len(detections)):
d1 = detections[i]
if d1["score"] < self.minThreshold:
continue
for j in range(i + 1, len(detections)):
d2 = detections[j]
if d2["score"] < self.minThreshold:
continue
if d1["className"] != d2["className"]:
continue
l1, t1, w1, h1 = d1["boundingBox"]
l2, t2, w2, h2 = d2["boundingBox"]
r1 = l1 + w1
b1 = t1 + h1
r2 = l2 + w2
b2 = t2 + h2
left = max(l1, l2)
top = max(t1, t2)
right = min(r1, r2)
bottom = min(b1, b2)
if left < right and top < bottom:
area1 = (r1 - l1) * (b1 - t1)
area2 = (r2 - l2) * (b2 - t2)
intersect = (right - left) * (bottom - top)
iou = intersect / (area1 + area2 - intersect)
if iou > 0.5:
if d1["score"] > d2["score"]:
d2["score"] = 0
else:
d1["score"] = 0
# remove anything with score 0
ret["detections"] = [d for d in detections if d["score"] >= self.minThreshold]
futures: List[Future] = []
for d in ret["detections"]:
if d["className"] == "face":
futures.append(asyncio.ensure_future(self.setEmbedding(d, image)))
elif d["className"] == "plate":
futures.append(asyncio.ensure_future(self.setLabel(d, image)))
if len(futures):
await asyncio.wait(futures)
last = None
for d in ret['detections']:
if d["className"] != "face":
continue
check = d.get("embedding")
if check is None:
continue
# decode base64 string check
embedding = base64.b64decode(check)
embedding = np.frombuffer(embedding, dtype=np.float32)
if last is None:
last = embedding
continue
# convert to numpy float32 arrays
similarity = cosine_similarity(last, embedding)
print('similarity', similarity)
last = embedding
return ret

View File

@@ -5,7 +5,7 @@ import json
from PIL import Image
from pycoral.adapters import detect
from .tflite_common import *
from .common import *
loaded_py_coral = False
try:
@@ -26,28 +26,12 @@ import scrypted_sdk
import tflite_runtime.interpreter as tflite
from scrypted_sdk.types import Setting, SettingValue
from common import yolo
import yolo
from predict import PredictPlugin
availableModels = [
"Default",
"ssd_mobilenet_v2_coco_quant_postprocess",
"tf2_ssd_mobilenet_v2_coco17_ptq",
"ssdlite_mobiledet_coco_qat_postprocess",
"scrypted_yolov6n_320",
"scrypted_yolov6s_320",
"scrypted_yolov9c_320",
"scrypted_yolov8n_320",
"efficientdet_lite0_320_ptq",
"efficientdet_lite1_384_ptq",
"efficientdet_lite2_448_ptq",
"efficientdet_lite3_512_ptq",
"efficientdet_lite3x_640_ptq",
]
def parse_label_contents(contents: str):
lines = contents.splitlines()
lines = [line for line in lines if line.strip()]
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r"[:\s]+", content.strip(), maxsplit=1)
@@ -75,39 +59,30 @@ class TensorFlowLitePlugin(
edge_tpus = None
pass
model_version = "v13"
model_version = "v12"
model = self.storage.getItem("model") or "Default"
if model not in availableModels:
self.storage.setItem("model", "Default")
model = "Default"
defaultModel = model == "Default"
branch = "main"
labelsFile = None
def configureModel():
nonlocal labelsFile
nonlocal model
if defaultModel:
if edge_tpus and next(
(obj for obj in edge_tpus if obj["type"] == "usb"), None
):
model = "ssdlite_mobiledet_coco_qat_postprocess"
if edge_tpus and next((obj for obj in edge_tpus if obj['type'] == 'usb'), None):
model = "yolov8n_full_integer_quant_320"
# this model seems completely wacky with lots of false positives.
# might be broken?
# model = "ssdlite_mobiledet_coco_qat_postprocess"
else:
model = "efficientdet_lite0_320_ptq"
self.yolo = "yolo" in model
self.yolov9 = "yolov9" in model
self.scrypted_model = "scrypted" in model
self.yolov8 = "yolov8" in model
print(f"model: {model}")
print(f'model: {model}')
if self.scrypted_model:
labelsFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/tflite-models/{branch}/scrypted_labels.txt",
f"{model_version}/scrypted_labels.txt",
)
elif self.yolo:
if self.yolo:
labelsFile = self.downloadFile(
f"https://raw.githubusercontent.com/koush/tflite-models/{branch}/coco_80cl.txt",
f"{model_version}/coco_80cl.txt",
@@ -125,10 +100,9 @@ class TensorFlowLitePlugin(
self.interpreter_count = 0
def downloadModel():
tflite_model = "best_full_integer_quant" if self.scrypted_model else model
return self.downloadFile(
f"https://github.com/koush/tflite-models/raw/{branch}/{model}/{tflite_model}{suffix}.tflite",
f"{model_version}/{tflite_model}{suffix}.tflite",
f"https://github.com/koush/tflite-models/raw/{branch}/{model}/{model}{suffix}.tflite",
f"{model_version}/{model}{suffix}.tflite",
)
try:
@@ -194,7 +168,19 @@ class TensorFlowLitePlugin(
"key": "model",
"title": "Model",
"description": "The detection model used to find objects.",
"choices": availableModels,
"choices": [
"Default",
"ssd_mobilenet_v2_coco_quant_postprocess",
"tf2_ssd_mobilenet_v2_coco17_ptq",
"ssdlite_mobiledet_coco_qat_postprocess",
"yolov8n_full_integer_quant",
"yolov8n_full_integer_quant_320",
"efficientdet_lite0_320_ptq",
"efficientdet_lite1_384_ptq",
"efficientdet_lite2_448_ptq",
"efficientdet_lite3_512_ptq",
"efficientdet_lite3x_640_ptq",
],
"value": model,
},
]
@@ -211,12 +197,12 @@ class TensorFlowLitePlugin(
interpreter = self.interpreters.get()
try:
if self.yolo:
tensor_index = input_details(interpreter, "index")
tensor_index = input_details(interpreter, 'index')
im = np.stack([input])
i = interpreter.get_input_details()[0]
if i["dtype"] == np.int8:
scale, zero_point = i["quantization"]
if i['dtype'] == np.int8:
scale, zero_point = i['quantization']
if scale == 0.003986024297773838 and zero_point == -128:
# fast path for quantization 1/255 = 0.003986024297773838
im = im.view(np.int8)
@@ -231,23 +217,18 @@ class TensorFlowLitePlugin(
interpreter.invoke()
output_details = interpreter.get_output_details()
output = output_details[0]
x = interpreter.get_tensor(output["index"])
x = interpreter.get_tensor(output['index'])
input_scale = self.get_input_details()[0]
if x.dtype == np.int8:
scale, zero_point = output["quantization"]
scale, zero_point = output['quantization']
threshold = yolo.defaultThreshold / scale + zero_point
combined_scale = scale * input_scale
objs = yolo.parse_yolov9(
x[0],
threshold,
scale=lambda v: (v - zero_point) * combined_scale,
confidence_scale=lambda v: (v - zero_point) * scale,
)
objs = yolo.parse_yolov8(x[0], threshold, scale=lambda v: (v - zero_point) * combined_scale, confidence_scale=lambda v: (v - zero_point) * scale)
else:
# this code path is unused.
objs = yolo.parse_yolov9(x[0], scale=lambda v: v * input_scale)
objs = yolo.parse_yolov8(x[0], scale=lambda v: v * input_scale)
else:
tflite_common.set_input(interpreter, input)
common.set_input(interpreter, input)
interpreter.invoke()
objs = detect.get_objects(
interpreter, score_threshold=0.2, image_scale=(1, 1)

View File

@@ -0,0 +1 @@
../../openvino/src/yolo

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/unifi-protect",
"version": "0.0.146",
"version": "0.0.145",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/unifi-protect",
"version": "0.0.146",
"version": "0.0.145",
"license": "Apache",
"dependencies": {
"@koush/unifi-protect": "file:../../external/unifi-protect",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/unifi-protect",
"version": "0.0.146",
"version": "0.0.145",
"description": "Unifi Protect Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",

View File

@@ -182,27 +182,12 @@ export class UnifiProtect extends ScryptedDeviceBase implements Settings, Device
let detections: ObjectDetectionResult[] = [];
// const event = {
// type: 'smartDetectZone',
// start: 1713211066646,
// score: 80,
// smartDetectTypes: [ 'licensePlate', 'vehicle' ],
// smartDetectEvents: [],
// metadata: { licensePlate: { name: 'ABCDEFG', confidenceLevel: 90 } },
// camera: '64b2e59f0106eb03e4001210',
// partition: null,
// user: null,
// id: '661d86bf03e69c03e408d62a',
// modelKey: 'event'
// }
if (payload.type === 'smartDetectZone' || payload.type === 'smartDetectLine') {
unifiCamera.resetDetectionTimeout();
detections = payload.smartDetectTypes.map(type => ({
className: type,
score: payload.score,
label: (payload as any).metadata?.[type]?.name,
}));
}
else {

Some files were not shown because too many files have changed in this diff Show More