Compare commits

..

54 Commits

Author SHA1 Message Date
Koushik Dutta
06d3c89274 prepublish 2023-03-16 23:59:10 -07:00
Koushik Dutta
e13f3eb2f1 server: add python forked processes to stats 2023-03-16 23:59:01 -07:00
Koushik Dutta
001918d613 predict: fix detections from webui 2023-03-16 23:58:45 -07:00
Koushik Dutta
c859c3aa40 detect: publish plugins with new video pipeline support 2023-03-16 23:40:33 -07:00
Koushik Dutta
2bce019677 predict: make models a separate download 2023-03-16 23:29:02 -07:00
Koushik Dutta
6ba3386157 detect: fix peer kill causing exception inside finally handler 2023-03-16 22:10:25 -07:00
Koushik Dutta
51e66d98f9 videoanalysis: changing motion detect mode should restart motion detection 2023-03-16 22:09:56 -07:00
Koushik Dutta
6484804649 server: update package lock 2023-03-16 20:37:54 -07:00
Koushik Dutta
b2a05c099d prepublish 2023-03-16 20:37:42 -07:00
Koushik Dutta
898331da4c Merge branch 'main' of github.com:koush/scrypted 2023-03-16 20:37:33 -07:00
Koushik Dutta
9044e782b2 python-codecs: add gray decoding support 2023-03-16 20:37:28 -07:00
Koushik Dutta
aedb985941 detect: support motion on new pipeline 2023-03-16 20:37:12 -07:00
Koushik Dutta
9ba22e4058 server: fix python rpc kill handling 2023-03-16 20:33:09 -07:00
Alex Leeds
ab0afb61ae ring: add video clips support (#635)
* ring: add video clips support

* fix merge
2023-03-16 18:40:36 -07:00
Alex Leeds
bf00ba0adc ring: add support for locks (#634) 2023-03-16 18:32:20 -07:00
Koushik Dutta
d564cf1b62 server: update package lock 2023-03-16 11:13:24 -07:00
Koushik Dutta
544dfb3b24 Update rtsp-proxy.ts 2023-03-16 10:40:19 -07:00
Koushik Dutta
cf9af910be rtsp: rtsp proxy example 2023-03-16 10:03:24 -07:00
Koushik Dutta
e2e65f93af prepublish 2023-03-16 09:37:34 -07:00
Koushik Dutta
b271567428 server: Fix device initialization on first report 2023-03-16 09:37:25 -07:00
Koushik Dutta
a88a295d9a server: fixup project file 2023-03-15 23:09:16 -07:00
Koushik Dutta
38ba31ca7d tensorflow-lite: use multiple tpu 2023-03-15 23:08:48 -07:00
Koushik Dutta
1c8ff2493b coreml: move prediction onto background thread 2023-03-15 23:04:45 -07:00
Koushik Dutta
5c9f62e6b6 videoanalysis: add snapshot pipeline 2023-03-15 23:04:13 -07:00
Koushik Dutta
6fd8018c52 python-codecs: fix nre 2023-03-15 23:02:50 -07:00
Koushik Dutta
d900ddf5f1 mac: fix erroneous typing installation 2023-03-15 21:54:17 -07:00
Koushik Dutta
e3a8d311ce python-codecs: add libav support 2023-03-15 20:33:44 -07:00
Koushik Dutta
8bbc3d5470 videoanalysis: generator cleanup 2023-03-15 17:18:28 -07:00
Koushik Dutta
00cf987cec videoanalysis: reimplemnet snapshots for new pipeline 2023-03-15 17:03:34 -07:00
Koushik Dutta
7e5dcae64a webrtc/alexa: add option to disable TURN on peers that already have externally reachable addresses 2023-03-15 10:31:25 -07:00
Koushik Dutta
cb67237d7c server: update package lock 2023-03-15 01:28:39 -07:00
Koushik Dutta
4be848c440 prepublish 2023-03-15 01:28:05 -07:00
Koushik Dutta
b33422b066 server: fix python fork hangs 2023-03-15 01:28:01 -07:00
Koushik Dutta
77418684da server: publish 2023-03-14 23:50:22 -07:00
Koushik Dutta
08cf9f7774 prepublish 2023-03-14 23:49:51 -07:00
Koushik Dutta
9f2fabf9c0 Merge branch 'main' of github.com:koush/scrypted 2023-03-14 23:47:24 -07:00
Koushik Dutta
e2e1c7be44 server: remove python log statement 2023-03-14 23:47:05 -07:00
Koushik Dutta
ba030ba197 server: fix multiprocessing blocking read on linux 2023-03-14 23:45:06 -07:00
Koushik Dutta
a4f37bdc16 snapshot: publish 2023-03-14 23:42:33 -07:00
Koushik Dutta
f6c7b00562 tensorflow-lite: fix numpy serialization issue 2023-03-14 23:41:55 -07:00
Koushik Dutta
b951614f7c Merge branch 'main' of github.com:koush/scrypted 2023-03-14 20:13:28 -07:00
Koushik Dutta
f1dfdb3494 coreml: revert tracker dependency removal 2023-03-14 20:13:22 -07:00
Nick Berardi
ffbd25b13b alexa: set screen ratio to 720p (#625) 2023-03-14 18:40:47 -07:00
Koushik Dutta
4f03fe2420 docker: fix pyvips cffi mismatch 2023-03-14 18:00:10 -07:00
Koushik Dutta
ffdb386afa mac: include libvips in installer 2023-03-14 17:25:47 -07:00
Koushik Dutta
9eeeaa79d0 docker: include libvips 2023-03-14 16:08:22 -07:00
Koushik Dutta
4163142d1e Merge branch 'main' of github.com:koush/scrypted 2023-03-14 15:45:35 -07:00
Koushik Dutta
71cddc67e0 predict: publish new pipeline support 2023-03-14 15:45:30 -07:00
Alex Leeds
2cbc4eb54f eufy: support multiple p2p streams (#624) 2023-03-14 15:26:46 -07:00
Koushik Dutta
fc94fb4221 core: republish 2023-03-14 15:21:13 -07:00
Koushik Dutta
85ed41c590 server: publish 2023-03-14 15:15:09 -07:00
Koushik Dutta
59f889a200 prepublish 2023-03-14 15:15:05 -07:00
Koushik Dutta
7dc476fe02 prepublish 2023-03-14 15:14:59 -07:00
Koushik Dutta
f5070f1ff1 server: publish 2023-03-14 15:14:41 -07:00
60 changed files with 980 additions and 3173 deletions

56
common/test/rtsp-proxy.ts Normal file
View File

@@ -0,0 +1,56 @@
import net from 'net';
import { listenZero } from '../src/listen-cluster';
import { RtspClient, RtspServer } from '../src/rtsp-server';
async function main() {
const server = net.createServer(async serverSocket => {
const client = new RtspClient('rtsp://localhost:57594/911db962087f904d');
await client.options();
const describeResponse = await client.describe();
const sdp = describeResponse.body.toString();
const server = new RtspServer(serverSocket, sdp, true);
const setupResponse = await server.handlePlayback();
if (setupResponse !== 'play') {
serverSocket.destroy();
client.client.destroy();
return;
}
console.log('playback handled');
let channel = 0;
for (const track of Object.keys(server.setupTracks)) {
const setupTrack = server.setupTracks[track];
await client.setup({
// type: 'udp',
type: 'tcp',
port: channel,
path: setupTrack.control,
onRtp(rtspHeader, rtp) {
server.sendTrack(setupTrack.control, rtp, false);
},
});
channel += 2;
}
await client.play();
console.log('client playing');
await client.readLoop();
});
let port: number;
if (false) {
port = await listenZero(server);
}
else {
port = 5555;
server.listen(5555)
}
console.log(`rtsp://127.0.0.1:${port}`);
}
main();

View File

@@ -33,7 +33,8 @@ RUN apt-get -y install \
gcc \
libgirepository1.0-dev \
libglib2.0-dev \
pkg-config
pkg-config \
libvips
# ffmpeg
RUN apt-get -y install \
@@ -61,6 +62,9 @@ RUN apt-get -y install \
# python pip
RUN python3 -m pip install --upgrade pip
# pyvips is broken on x86 due to mismatch ffi
# https://stackoverflow.com/questions/62658237/it-seems-that-the-version-of-the-libffi-library-seen-at-runtime-is-different-fro
RUN pip install --force-reinstall --no-binary :all: cffi
RUN python3 -m pip install aiofiles debugpy typing_extensions typing psutil
################################################################

View File

@@ -40,13 +40,14 @@ echo "Installing Scrypted dependencies..."
RUN_IGNORE xcode-select --install
RUN brew update
RUN_IGNORE brew install node@18
# needed by scrypted-ffmpeg
RUN_IGNORE brew install sdl2
# snapshot plugin and others
RUN brew install libvips
# gstreamer plugins
RUN_IGNORE brew install gstreamer gst-plugins-base gst-plugins-good gst-plugins-bad gst-plugins-ugly
# gst python bindings
RUN_IGNORE brew install gst-python
# python image library
# todo: consider removing this
RUN_IGNORE brew install pillow
### HACK WORKAROUND
@@ -102,7 +103,11 @@ then
fi
RUN python$PYTHON_VERSION -m pip install --upgrade pip
RUN python$PYTHON_VERSION -m pip install aiofiles debugpy typing_extensions typing opencv-python psutil
if [ "$PYTHON_VERSION" != "3.10" ]
then
RUN python$PYTHON_VERSION -m pip install typing
fi
RUN python$PYTHON_VERSION -m pip install aiofiles debugpy typing_extensions opencv-python psutil
echo "Installing Scrypted Launch Agent..."

View File

@@ -30,7 +30,8 @@ RUN apt-get -y install \
gcc \
libgirepository1.0-dev \
libglib2.0-dev \
pkg-config
pkg-config \
libvips
# ffmpeg
RUN apt-get -y install \
@@ -58,6 +59,9 @@ RUN apt-get -y install \
# python pip
RUN python3 -m pip install --upgrade pip
# pyvips is broken on x86 due to mismatch ffi
# https://stackoverflow.com/questions/62658237/it-seems-that-the-version-of-the-libffi-library-seen-at-runtime-is-different-fro
RUN pip install --force-reinstall --no-binary :all: cffi
RUN python3 -m pip install aiofiles debugpy typing_extensions typing psutil
################################################################

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/alexa",
"version": "0.2.0",
"version": "0.2.3",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",
"prescrypted-setup-project": "scrypted-package-json",
@@ -39,6 +39,6 @@
},
"devDependencies": {
"@types/node": "^18.4.2",
"@scrypted/sdk": "^0.2.70"
"@scrypted/sdk": "../../sdk"
}
}

View File

@@ -17,6 +17,13 @@ export class AlexaSignalingSession implements RTCSignalingSession {
sdp: this.directive.payload.offer.value,
},
disableTrickle: true,
disableTurn: true,
// this could be a low resolution screen, no way of knowning, so never send a 1080p stream
screen: {
devicePixelRatio: 1,
width: 1280,
height: 720
}
}
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/core",
"version": "0.1.102",
"version": "0.1.103",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/core",
"version": "0.1.102",
"version": "0.1.103",
"license": "Apache-2.0",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/core",
"version": "0.1.102",
"version": "0.1.103",
"description": "Scrypted Core plugin. Provides the UI, websocket, and engine.io APIs.",
"author": "Scrypted",
"license": "Apache-2.0",

View File

@@ -1,20 +0,0 @@
#!/bin/sh
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
rm -rf all_models
mkdir -p all_models
cd all_models
wget https://github.com/koush/coreml-survival-guide/raw/master/MobileNetV2%2BSSDLite/ObjectDetection/ObjectDetection/MobileNetV2_SSDLite.mlmodel
wget https://raw.githubusercontent.com/koush/coreml-survival-guide/master/MobileNetV2%2BSSDLite/coco_labels.txt

View File

@@ -1 +0,0 @@
../all_models/MobileNetV2_SSDLite.mlmodel

View File

@@ -1 +0,0 @@
../all_models/coco_labels.txt

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/coreml",
"version": "0.0.24",
"version": "0.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/coreml",
"version": "0.0.24",
"version": "0.1.2",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -41,5 +41,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.0.24"
"version": "0.1.2"
}

View File

@@ -6,6 +6,10 @@ from predict import PredictPlugin, Prediction, Rectangle
import coremltools as ct
import os
from PIL import Image
import asyncio
import concurrent.futures
predictExecutor = concurrent.futures.ThreadPoolExecutor(2, "CoreML-Predict")
def parse_label_contents(contents: str):
lines = contents.splitlines()
@@ -25,17 +29,19 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
def __init__(self, nativeId: str | None = None):
super().__init__(MIME_TYPE, nativeId=nativeId)
modelPath = os.path.join(os.environ['SCRYPTED_PLUGIN_VOLUME'], 'zip', 'unzipped', 'fs', 'MobileNetV2_SSDLite.mlmodel')
self.model = ct.models.MLModel(modelPath)
labelsFile = self.downloadFile('https://raw.githubusercontent.com/koush/coreml-survival-guide/master/MobileNetV2%2BSSDLite/coco_labels.txt', 'coco_labels.txt')
modelFile = self.downloadFile('https://github.com/koush/coreml-survival-guide/raw/master/MobileNetV2%2BSSDLite/ObjectDetection/ObjectDetection/MobileNetV2_SSDLite.mlmodel', 'MobileNetV2_SSDLite.mlmodel')
self.model = ct.models.MLModel(modelFile)
self.modelspec = self.model.get_spec()
self.inputdesc = self.modelspec.description.input[0]
self.inputheight = self.inputdesc.type.imageType.height
self.inputwidth = self.inputdesc.type.imageType.width
labels_contents = scrypted_sdk.zip.open(
'fs/coco_labels.txt').read().decode('utf8')
labels_contents = open(labelsFile, 'r').read()
self.labels = parse_label_contents(labels_contents)
self.loop = asyncio.get_event_loop()
# width, height, channels
def get_input_details(self) -> Tuple[int, int, int]:
@@ -44,8 +50,12 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
def get_input_size(self) -> Tuple[float, float]:
return (self.inputwidth, self.inputheight)
def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
out_dict = self.model.predict({'image': input, 'confidenceThreshold': .2 })
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
# run in executor if this is the plugin loop
if asyncio.get_event_loop() is self.loop:
out_dict = await asyncio.get_event_loop().run_in_executor(predictExecutor, lambda: self.model.predict({'image': input, 'confidenceThreshold': .2 }))
else:
out_dict = self.model.predict({'image': input, 'confidenceThreshold': .2 })
coordinatesList = out_dict['coordinates']

View File

@@ -3,3 +3,8 @@ Pillow>=5.4.1
PyGObject>=3.30.4
coremltools~=6.1
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
# sort_oh
scipy
filterpy
numpy

View File

@@ -33,6 +33,7 @@
}
},
"../../packages/h264-repacketizer": {
"name": "@scrypted/h264-repacketizer",
"version": "0.0.6",
"license": "ISC",
"devDependencies": {
@@ -43,7 +44,7 @@
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.2.84",
"version": "0.2.85",
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.18.6",

View File

@@ -2,7 +2,7 @@ import { listenSingleRtspClient } from '@scrypted/common/src/rtsp-server';
import { addTrackControls, parseSdp } from '@scrypted/common/src/sdp-utils';
import sdk, { Battery, Camera, Device, DeviceProvider, FFmpegInput, MediaObject, MotionSensor, RequestMediaStreamOptions, RequestPictureOptions, ResponseMediaStreamOptions, ResponsePictureOptions, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, Setting, Settings, SettingValue, VideoCamera } from '@scrypted/sdk';
import { StorageSettings } from '@scrypted/sdk/storage-settings';
import eufy, { CaptchaOptions, EufySecurity } from 'eufy-security-client';
import eufy, { CaptchaOptions, EufySecurity, P2PClientProtocol, P2PConnectionType } from 'eufy-security-client';
import { startRtpForwarderProcess } from '../../webrtc/src/rtp-forwarders';
import { Deferred } from '@scrypted/common/src/deferred';
@@ -11,17 +11,14 @@ import { LocalLivestreamManager } from './stream';
const { deviceManager, mediaManager, systemManager } = sdk;
class EufyCamera extends ScryptedDeviceBase implements Camera, VideoCamera, Battery, MotionSensor {
class EufyCamera extends ScryptedDeviceBase implements VideoCamera, MotionSensor {
client: EufySecurity;
device: eufy.Camera;
livestreamManager: LocalLivestreamManager
constructor(nativeId: string, client: EufySecurity, device: eufy.Camera) {
super(nativeId);
this.client = client;
this.device = device;
this.livestreamManager = new LocalLivestreamManager(this.client, this.device, this.console);
this.batteryLevel = this.device.getBatteryValue() as number;
this.setupMotionDetection();
}
@@ -37,31 +34,6 @@ class EufyCamera extends ScryptedDeviceBase implements Camera, VideoCamera, Batt
this.device.on('radar motion detected', handle);
}
async takePicture(options?: RequestPictureOptions): Promise<MediaObject> {
// if this stream is prebuffered, its safe to use the prebuffer to generate an image
const realDevice = systemManager.getDeviceById<VideoCamera>(this.id);
try {
const msos = await realDevice.getVideoStreamOptions();
const prebuffered: RequestMediaStreamOptions = msos.find(mso => mso.prebuffer);
if (prebuffered) {
prebuffered.refresh = false;
return realDevice.getVideoStream(prebuffered);
}
} catch (e) {}
// try to fetch the cloud image if one exists
const url = this.device.getLastCameraImageURL();
if (url) {
return mediaManager.createMediaObjectFromUrl(url.toString());
}
throw new Error("snapshot unavailable");
}
getPictureOptions(): Promise<ResponsePictureOptions[]> {
return;
}
getVideoStream(options?: ResponseMediaStreamOptions): Promise<MediaObject> {
return this.createVideoStream(options);
}
@@ -80,15 +52,32 @@ class EufyCamera extends ScryptedDeviceBase implements Camera, VideoCamera, Batt
},
tool: 'scrypted',
userConfigurable: false,
}
},
{
container: 'rtsp',
id: 'p2p-low',
name: 'P2P (Low Resolution)',
video: {
codec: 'h264',
width: 1280,
height: 720,
},
audio: {
codec: 'aac',
},
tool: 'scrypted',
userConfigurable: false,
},
];
}
async createVideoStream(options?: ResponseMediaStreamOptions): Promise<MediaObject> {
const livestreamManager = new LocalLivestreamManager(options.id, this.client, this.device, this.console);
const kill = new Deferred<void>();
kill.promise.finally(() => {
this.console.log('video stream exited');
this.livestreamManager.stopLocalLiveStream();
livestreamManager.stopLocalLiveStream();
});
const rtspServer = await listenSingleRtspClient();
@@ -138,7 +127,7 @@ class EufyCamera extends ScryptedDeviceBase implements Camera, VideoCamera, Batt
await rtsp.handlePlayback();
});
const proxyStream = await this.livestreamManager.getLocalLivestream();
const proxyStream = await livestreamManager.getLocalLivestream();
proxyStream.videostream.pipe(process.cp.stdio[4] as Writable);
proxyStream.audiostream.pipe((process.cp.stdio as any)[5] as Writable);
}
@@ -240,14 +229,13 @@ class EufyPlugin extends ScryptedDeviceBase implements DeviceProvider, Settings
password: this.storageSettings.values.password,
country: this.storageSettings.values.country,
language: 'en',
p2pConnectionSetup: 2,
p2pConnectionSetup: P2PConnectionType.QUICKEST,
pollingIntervalMinutes: 10,
eventDurationSeconds: 10
}
this.client = await EufySecurity.initialize(config);
this.client.on('device added', this.deviceAdded.bind(this));
this.client.on('station added', this.stationAdded.bind(this));
this.client.on('tfa request', () => {
this.log.a('Login failed: 2FA is enabled, check your email or texts for your code, then enter it into the Two Factor Code setting to conplete login.');
});
@@ -277,7 +265,6 @@ class EufyPlugin extends ScryptedDeviceBase implements DeviceProvider, Settings
const nativeId = eufyDevice.getSerial();
const interfaces = [
ScryptedInterface.Camera,
ScryptedInterface.VideoCamera
];
if (eufyDevice.hasBattery())

View File

@@ -1,7 +1,7 @@
// Based off of https://github.com/homebridge-eufy-security/plugin/blob/master/src/plugin/controller/LocalLivestreamManager.ts
import { Camera, CommandData, CommandName, CommandType, Device, DeviceType, EufySecurity, isGreaterEqualMinVersion, P2PClientProtocol, ParamType, Station, StreamMetadata, VideoCodec } from 'eufy-security-client';
import { EventEmitter, Readable } from 'stream';
import { Station, Device, StreamMetadata, Camera, EufySecurity } from 'eufy-security-client';
type StationStream = {
station: Station;
@@ -19,28 +19,39 @@ export class LocalLivestreamManager extends EventEmitter {
private livestreamStartedAt: number | null;
private livestreamIsStarting = false;
private readonly id: string;
private readonly client: EufySecurity;
private readonly device: Camera;
constructor(client: EufySecurity, device: Camera, console: Console) {
private station: Station;
private p2pSession: P2PClientProtocol;
constructor(id: string, client: EufySecurity, device: Camera, console: Console) {
super();
this.id = id;
this.console = console;
this.client = client;
this.device = device;
this.client.getStation(this.device.getStationSerial()).then( (station) => {
this.station = station;
this.p2pSession = new P2PClientProtocol(station.getRawStation(), this.client.getApi(), station.getIPAddress());
this.p2pSession.on("livestream started", (channel: number, metadata: StreamMetadata, videostream: Readable, audiostream: Readable) => {
this.onStationLivestreamStart(station, device, metadata, videostream, audiostream);
});
this.p2pSession.on("livestream stopped", (channel: number) => {
this.onStationLivestreamStop(station, device);
});
this.p2pSession.on("livestream error", (channel: number, error: Error) => {
this.stopLivestream();
});
});
this.stationStream = null;
this.livestreamStartedAt = null;
this.initialize();
this.client.on('station livestream stop', (station: Station, device: Device) => {
this.onStationLivestreamStop(station, device);
});
this.client.on('station livestream start',
(station: Station, device: Device, metadata: StreamMetadata, videostream: Readable, audiostream: Readable) => {
this.onStationLivestreamStart(station, device, metadata, videostream, audiostream);
});
}
private initialize() {
@@ -55,10 +66,10 @@ export class LocalLivestreamManager extends EventEmitter {
}
public async getLocalLivestream(): Promise<StationStream> {
this.console.debug(this.device.getName(), 'New instance requests livestream.');
this.console.debug(this.device.getName(), this.id, 'New instance requests livestream.');
if (this.stationStream) {
const runtime = (Date.now() - this.livestreamStartedAt!) / 1000;
this.console.debug(this.device.getName(), 'Using livestream that was started ' + runtime + ' seconds ago.');
this.console.debug(this.device.getName(), this.id, 'Using livestream that was started ' + runtime + ' seconds ago.');
return this.stationStream;
} else {
return await this.startAndGetLocalLiveStream();
@@ -67,17 +78,17 @@ export class LocalLivestreamManager extends EventEmitter {
private async startAndGetLocalLiveStream(): Promise<StationStream> {
return new Promise((resolve, reject) => {
this.console.debug(this.device.getName(), 'Start new station livestream (P2P Session)...');
this.console.debug(this.device.getName(), this.id, 'Start new station livestream...');
if (!this.livestreamIsStarting) { // prevent multiple stream starts from eufy station
this.livestreamIsStarting = true;
this.client.startStationLivestream(this.device.getSerial());
this.startStationLivestream();
} else {
this.console.debug(this.device.getName(), 'stream is already starting. waiting...');
this.console.debug(this.device.getName(), this.id, 'stream is already starting. waiting...');
}
this.once('livestream start', async () => {
if (this.stationStream !== null) {
this.console.debug(this.device.getName(), 'New livestream started.');
this.console.debug(this.device.getName(), this.id, 'New livestream started.');
this.livestreamIsStarting = false;
resolve(this.stationStream);
} else {
@@ -87,15 +98,102 @@ export class LocalLivestreamManager extends EventEmitter {
});
}
private async startStationLivestream(videoCodec: VideoCodec = VideoCodec.H264): Promise<void> {
const commandData: CommandData = {
name: CommandName.DeviceStartLivestream,
value: videoCodec
};
this.console.debug(this.device.getName(), this.id, `Sending start livestream command to station ${this.station.getSerial()}`);
const rsa_key = this.p2pSession.getRSAPrivateKey();
if (this.device.isSoloCameras() || this.device.getDeviceType() === DeviceType.FLOODLIGHT_CAMERA_8423 || this.device.isWiredDoorbellT8200X()) {
this.console.debug(this.device.getName(), this.id, `Using CMD_DOORBELL_SET_PAYLOAD (1) for station ${this.station.getSerial()} (main_sw_version: ${this.station.getSoftwareVersion()})`);
await this.p2pSession.sendCommandWithStringPayload({
commandType: CommandType.CMD_DOORBELL_SET_PAYLOAD,
value: JSON.stringify({
"commandType": ParamType.COMMAND_START_LIVESTREAM,
"data": {
"accountId": this.station.getRawStation().member.admin_user_id,
"encryptkey": rsa_key?.exportKey("components-public").n.slice(1).toString("hex"),
"streamtype": videoCodec
}
}),
channel: this.device.getChannel()
}, {
command: commandData
});
} else if (this.device.isWiredDoorbell() || (this.device.isFloodLight() && this.device.getDeviceType() !== DeviceType.FLOODLIGHT) || this.device.isIndoorCamera() || (this.device.getSerial().startsWith("T8420") && isGreaterEqualMinVersion("2.0.4.8", this.station.getSoftwareVersion()))) {
this.console.debug(this.device.getName(), this.id, `Using CMD_DOORBELL_SET_PAYLOAD (2) for station ${this.station.getSerial()} (main_sw_version: ${this.station.getSoftwareVersion()})`);
await this.p2pSession.sendCommandWithStringPayload({
commandType: CommandType.CMD_DOORBELL_SET_PAYLOAD,
value: JSON.stringify({
"commandType": ParamType.COMMAND_START_LIVESTREAM,
"data": {
"account_id": this.station.getRawStation().member.admin_user_id,
"encryptkey": rsa_key?.exportKey("components-public").n.slice(1).toString("hex"),
"streamtype": videoCodec
}
}),
channel: this.device.getChannel()
}, {
command: commandData
});
} else {
if ((Device.isIntegratedDeviceBySn(this.station.getSerial()) || !isGreaterEqualMinVersion("2.0.9.7", this.station.getSoftwareVersion())) && (!this.station.getSerial().startsWith("T8420") || !isGreaterEqualMinVersion("1.0.0.25", this.station.getSoftwareVersion()))) {
this.console.debug(this.device.getName(), this.id, `Using CMD_START_REALTIME_MEDIA for station ${this.station.getSerial()} (main_sw_version: ${this.station.getSoftwareVersion()})`);
await this.p2pSession.sendCommandWithInt({
commandType: CommandType.CMD_START_REALTIME_MEDIA,
value: this.device.getChannel(),
strValue: rsa_key?.exportKey("components-public").n.slice(1).toString("hex"),
channel: this.device.getChannel()
}, {
command: commandData
});
} else {
this.console.debug(this.device.getName(), this.id, `Using CMD_SET_PAYLOAD for station ${this.station.getSerial()} (main_sw_version: ${this.station.getSoftwareVersion()})`);
await this.p2pSession.sendCommandWithStringPayload({
commandType: CommandType.CMD_SET_PAYLOAD,
value: JSON.stringify({
"account_id": this.station.getRawStation().member.admin_user_id,
"cmd": CommandType.CMD_START_REALTIME_MEDIA,
"mValue3": CommandType.CMD_START_REALTIME_MEDIA,
"payload": {
"ClientOS": "Android",
"key": rsa_key?.exportKey("components-public").n.slice(1).toString("hex"),
"streamtype": videoCodec === VideoCodec.H264 ? 1 : 2,
}
}),
channel: this.device.getChannel()
}, {
command: commandData
});
}
}
}
public stopLocalLiveStream(): void {
this.console.debug(this.device.getName(), 'Stopping station livestream.');
this.client.stopStationLivestream(this.device.getSerial());
this.console.debug(this.device.getName(), this.id, 'Stopping station livestream.');
this.stopLivestream();
this.initialize();
}
private async stopLivestream(): Promise<void> {
const commandData: CommandData = {
name: CommandName.DeviceStopLivestream
};
this.console.debug(this.device.getName(), this.id, `Sending stop livestream command to station ${this.station.getSerial()}`);
await this.p2pSession.sendCommandWithInt({
commandType: CommandType.CMD_STOP_REALTIME_MEDIA,
value: this.device.getChannel(),
channel: this.device.getChannel()
}, {
command: commandData
});
}
private onStationLivestreamStop(station: Station, device: Device) {
if (device.getSerial() === this.device.getSerial()) {
this.console.info(station.getName() + ' station livestream for ' + device.getName() + ' has stopped.');
this.console.info(this.id + ' - ' + station.getName() + ' station livestream for ' + device.getName() + ' has stopped.');
this.initialize();
}
}
@@ -111,17 +209,17 @@ export class LocalLivestreamManager extends EventEmitter {
if (this.stationStream) {
const diff = (Date.now() - this.stationStream.createdAt) / 1000;
if (diff < 5) {
this.console.warn(this.device.getName(), 'Second livestream was started from station. Ignore.');
this.console.warn(this.device.getName(), this.id, 'Second livestream was started from station. Ignore.');
return;
}
}
this.initialize(); // important to prevent unwanted behaviour when the eufy station emits the 'livestream start' event multiple times
this.console.info(station.getName() + ' station livestream (P2P session) for ' + device.getName() + ' has started.');
this.console.info(this.id + ' - ' + station.getName() + ' station livestream (P2P session) for ' + device.getName() + ' has started.');
this.livestreamStartedAt = Date.now();
const createdAt = Date.now();
this.stationStream = {station, device, metadata, videostream, audiostream, createdAt};
this.console.debug(this.device.getName(), 'Stream metadata: ' + JSON.stringify(this.stationStream.metadata));
this.console.debug(this.device.getName(), this.id, 'Stream metadata: ' + JSON.stringify(this.stationStream.metadata));
this.emit('livestream start');
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/objectdetector",
"version": "0.0.102",
"version": "0.0.107",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/objectdetector",
"version": "0.0.102",
"version": "0.0.107",
"license": "Apache-2.0",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/objectdetector",
"version": "0.0.102",
"version": "0.0.107",
"description": "Scrypted Video Analysis Plugin. Installed alongside a detection service like OpenCV or TensorFlow.",
"author": "Scrypted",
"license": "Apache-2.0",
@@ -38,7 +38,10 @@
"Settings",
"MixinProvider"
],
"realfs": true
"realfs": true,
"pluginDependencies": [
"@scrypted/python-codecs"
]
},
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,4 +1,4 @@
import sdk, { VideoFrameGenerator, Camera, DeviceState, EventListenerRegister, MediaObject, MixinDeviceBase, MixinProvider, MotionSensor, ObjectDetection, ObjectDetectionCallbacks, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedNativeId, Setting, Settings, SettingValue, VideoCamera } from '@scrypted/sdk';
import sdk, { ScryptedMimeTypes, Image, VideoFrame, VideoFrameGenerator, Camera, DeviceState, EventListenerRegister, MediaObject, MixinDeviceBase, MixinProvider, MotionSensor, ObjectDetection, ObjectDetectionCallbacks, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedNativeId, Setting, Settings, SettingValue, VideoCamera, MediaStreamDestination } from '@scrypted/sdk';
import { StorageSettings } from '@scrypted/sdk/storage-settings';
import crypto from 'crypto';
import cloneDeep from 'lodash/cloneDeep';
@@ -50,6 +50,22 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
detections = new Map<string, MediaObject>();
cameraDevice: ScryptedDevice & Camera & VideoCamera & MotionSensor & ObjectDetector;
storageSettings = new StorageSettings(this, {
newPipeline: {
title: 'Video Pipeline',
description: 'Configure how frames are provided to the video analysis pipeline.',
onGet: async () => {
const choices = [
'Default',
...getAllDevices().filter(d => d.interfaces.includes(ScryptedInterface.VideoFrameGenerator)).map(d => d.name),
];
if (!this.hasMotionType)
choices.push('Snapshot');
return {
choices,
}
},
defaultValue: 'Default',
},
motionSensorSupplementation: {
title: 'Built-In Motion Sensor',
description: `This camera has a built in motion sensor. Using ${this.objectDetection.name} may be unnecessary and will use additional CPU. Replace will ignore the built in motion sensor. Filter will verify the motion sent by built in motion sensor. The Default is ${BUILTIN_MOTION_SENSOR_REPLACE}.`,
@@ -59,6 +75,10 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
BUILTIN_MOTION_SENSOR_REPLACE,
],
defaultValue: "Default",
onPut: () => {
this.endObjectDetection();
this.maybeStartMotionDetection();
}
},
captureMode: {
title: 'Capture Mode',
@@ -128,7 +148,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
analyzeStop = 0;
lastDetectionInput = 0;
constructor(public plugin: ObjectDetectionPlugin, mixinDevice: VideoCamera & Camera & MotionSensor & ObjectDetector & Settings, mixinDeviceInterfaces: ScryptedInterface[], mixinDeviceState: { [key: string]: any }, providerNativeId: string, public objectDetection: ObjectDetection & ScryptedDevice, modelName: string, group: string, public hasMotionType: boolean, public settings: Setting[]) {
constructor(public plugin: ObjectDetectionPlugin, mixinDevice: VideoCamera & Camera & MotionSensor & ObjectDetector & Settings, mixinDeviceInterfaces: ScryptedInterface[], mixinDeviceState: { [key: string]: any }, providerNativeId: string, public objectDetection: ObjectDetection & ScryptedDevice, public model: ObjectDetectionModel, group: string, public hasMotionType: boolean, public settings: Setting[]) {
super({
mixinDevice, mixinDeviceState,
mixinProviderNativeId: providerNativeId,
@@ -139,7 +159,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
});
this.cameraDevice = systemManager.getDeviceById<Camera & VideoCamera & MotionSensor & ObjectDetector>(this.id);
this.detectionId = modelName + '-' + this.cameraDevice.id;
this.detectionId = model.name + '-' + this.cameraDevice.id;
this.bindObjectDetection();
this.register();
@@ -157,7 +177,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
if (this.hasMotionType) {
// force a motion detection restart if it quit
if (this.motionSensorSupplementation === BUILTIN_MOTION_SENSOR_REPLACE)
await this.startVideoDetection();
await this.startStreamAnalysis();
return;
}
}, this.storageSettings.values.detectionInterval * 1000);
@@ -210,7 +230,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
return;
if (this.motionSensorSupplementation !== BUILTIN_MOTION_SENSOR_REPLACE)
return;
await this.startVideoDetection();
await this.startStreamAnalysis();
}
endObjectDetection() {
@@ -296,7 +316,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
return;
if (!this.detectorRunning)
this.console.log('built in motion sensor started motion, starting video detection.');
await this.startVideoDetection();
await this.startStreamAnalysis();
return;
}
@@ -473,22 +493,70 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
if (this.detectorRunning)
return;
const stream = await this.cameraDevice.getVideoStream({
destination: 'local-recorder',
// ask rebroadcast to mute audio, not needed.
audio: null,
});
this.detectorRunning = true;
this.analyzeStop = Date.now() + this.getDetectionDuration();
const videoFrameGenerator = this.newPipeline as VideoFrameGenerator;
const newPipeline = this.newPipeline;
let generator: () => Promise<AsyncGenerator<VideoFrame & MediaObject>>;
if (newPipeline === 'Snapshot' && !this.hasMotionType) {
const self = this;
generator = async () => (async function* gen() {
try {
while (self.detectorRunning) {
const now = Date.now();
const sleeper = async () => {
const diff = now + 1100 - Date.now();
if (diff > 0)
await sleep(diff);
};
let image: MediaObject & VideoFrame;
try {
const mo = await self.cameraDevice.takePicture({
reason: 'event',
});
image = await sdk.mediaManager.convertMediaObject(mo, ScryptedMimeTypes.Image);
}
catch (e) {
self.console.error('Video analysis snapshot failed. Will retry in a moment.');
await sleeper();
continue;
}
// self.console.log('yield')
yield image;
// self.console.log('done yield')
await sleeper();
}
}
finally {
self.console.log('Snapshot generation finished.');
}
})();
}
else {
const destination: MediaStreamDestination = this.hasMotionType ? 'low-resolution' : 'local-recorder';
const videoFrameGenerator = systemManager.getDeviceById<VideoFrameGenerator>(newPipeline);
if (!videoFrameGenerator)
throw new Error('invalid VideoFrameGenerator');
const stream = await this.cameraDevice.getVideoStream({
destination,
// ask rebroadcast to mute audio, not needed.
audio: null,
});
generator = async () => videoFrameGenerator.generateVideoFrames(stream, {
resize: this.model?.inputSize ? {
width: this.model.inputSize[0],
height: this.model.inputSize[1],
} : undefined,
format: this.model?.inputFormat,
});
}
try {
const start = Date.now();
let detections = 0;
for await (const detected
of await this.objectDetection.generateObjectDetections(await videoFrameGenerator.generateVideoFrames(stream), {
of await this.objectDetection.generateObjectDetections(await generator(), {
settings: this.getCurrentSettings(),
})) {
if (!this.detectorRunning) {
@@ -532,6 +600,9 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
// this.handleDetectionEvent(detected.detected);
}
}
catch (e) {
this.console.error('video pipeline ended with error', e);
}
finally {
this.endObjectDetection();
}
@@ -849,8 +920,18 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
}
get newPipeline() {
return this.plugin.storageSettings.values.newPipeline;
}
if (!this.plugin.storageSettings.values.newPipeline)
return;
const newPipeline = this.storageSettings.values.newPipeline;
if (!newPipeline)
return newPipeline;
if (newPipeline === 'Snapshot')
return newPipeline;
const pipelines = getAllDevices().filter(d => d.interfaces.includes(ScryptedInterface.VideoFrameGenerator));
const found = pipelines.find(p => p.name === newPipeline);
return found?.id || pipelines[0]?.id;
}
async getMixinSettings(): Promise<Setting[]> {
const settings: Setting[] = [];
@@ -872,7 +953,8 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
}
this.storageSettings.settings.motionSensorSupplementation.hide = !this.hasMotionType || !this.mixinDeviceInterfaces.includes(ScryptedInterface.MotionSensor);
this.storageSettings.settings.captureMode.hide = this.hasMotionType || !!this.newPipeline;
this.storageSettings.settings.captureMode.hide = this.hasMotionType || !!this.plugin.storageSettings.values.newPipeline;
this.storageSettings.settings.newPipeline.hide = this.hasMotionType || !this.plugin.storageSettings.values.newPipeline;
this.storageSettings.settings.detectionDuration.hide = this.hasMotionType;
this.storageSettings.settings.detectionTimeout.hide = this.hasMotionType;
this.storageSettings.settings.motionDuration.hide = !this.hasMotionType;
@@ -1123,7 +1205,7 @@ class ObjectDetectorMixin extends MixinDeviceBase<ObjectDetection> implements Mi
const settings = this.model.settings;
const ret = new ObjectDetectionMixin(this.plugin, mixinDevice, mixinDeviceInterfaces, mixinDeviceState, this.mixinProviderNativeId, objectDetection, this.model.name, group, hasMotionType, settings);
const ret = new ObjectDetectionMixin(this.plugin, mixinDevice, mixinDeviceInterfaces, mixinDeviceState, this.mixinProviderNativeId, objectDetection, this.model, group, hasMotionType, settings);
this.currentMixins.add(ret);
return ret;
}
@@ -1141,8 +1223,7 @@ class ObjectDetectionPlugin extends AutoenableMixinProvider implements Settings
newPipeline: {
title: 'New Video Pipeline',
description: 'WARNING! DO NOT ENABLE: Use the new video pipeline. Leave blank to use the legacy pipeline.',
type: 'device',
deviceFilter: `interfaces.includes('${ScryptedInterface.VideoFrameGenerator}')`,
type: 'boolean',
},
activeMotionDetections: {
title: 'Active Motion Detection Sessions',

View File

@@ -9,3 +9,4 @@ dist/*.js
dist/*.txt
__pycache__
all_models
.venv

View File

@@ -16,6 +16,6 @@
"scrypted.pythonRemoteRoot": "${config:scrypted.serverRoot}/volume/plugin.zip",
"python.analysis.extraPaths": [
"./node_modules/@scrypted/sdk/scrypted_python"
"./node_modules/@scrypted/sdk/types/scrypted_python"
]
}

View File

@@ -1,50 +1,52 @@
{
"name": "@scrypted/opencv",
"version": "0.0.64",
"version": "0.0.66",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/opencv",
"version": "0.0.64",
"version": "0.0.66",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.1.17",
"version": "0.2.85",
"dev": true,
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.16.7",
"@babel/preset-typescript": "^7.18.6",
"adm-zip": "^0.4.13",
"axios": "^0.21.4",
"babel-loader": "^8.2.3",
"babel-loader": "^9.1.0",
"babel-plugin-const-enum": "^1.1.0",
"esbuild": "^0.15.9",
"ncp": "^2.0.0",
"raw-loader": "^4.0.2",
"rimraf": "^3.0.2",
"tmp": "^0.2.1",
"webpack": "^5.74.0",
"ts-loader": "^9.4.2",
"typescript": "^4.9.4",
"webpack": "^5.75.0",
"webpack-bundle-analyzer": "^4.5.0"
},
"bin": {
"scrypted-changelog": "bin/scrypted-changelog.js",
"scrypted-debug": "bin/scrypted-debug.js",
"scrypted-deploy": "bin/scrypted-deploy.js",
"scrypted-deploy-debug": "bin/scrypted-deploy-debug.js",
"scrypted-package-json": "bin/scrypted-package-json.js",
"scrypted-readme": "bin/scrypted-readme.js",
"scrypted-setup-project": "bin/scrypted-setup-project.js",
"scrypted-webpack": "bin/scrypted-webpack.js"
},
"devDependencies": {
"@types/node": "^16.11.1",
"@types/node": "^18.11.18",
"@types/stringify-object": "^4.0.0",
"stringify-object": "^3.3.0",
"ts-node": "^10.4.0",
"typedoc": "^0.23.15"
"typedoc": "^0.23.21"
}
},
"../sdk": {
@@ -59,12 +61,12 @@
"@scrypted/sdk": {
"version": "file:../../sdk",
"requires": {
"@babel/preset-typescript": "^7.16.7",
"@types/node": "^16.11.1",
"@babel/preset-typescript": "^7.18.6",
"@types/node": "^18.11.18",
"@types/stringify-object": "^4.0.0",
"adm-zip": "^0.4.13",
"axios": "^0.21.4",
"babel-loader": "^8.2.3",
"babel-loader": "^9.1.0",
"babel-plugin-const-enum": "^1.1.0",
"esbuild": "^0.15.9",
"ncp": "^2.0.0",
@@ -72,9 +74,11 @@
"rimraf": "^3.0.2",
"stringify-object": "^3.3.0",
"tmp": "^0.2.1",
"ts-loader": "^9.4.2",
"ts-node": "^10.4.0",
"typedoc": "^0.23.15",
"webpack": "^5.74.0",
"typedoc": "^0.23.21",
"typescript": "^4.9.4",
"webpack": "^5.75.0",
"webpack-bundle-analyzer": "^4.5.0"
}
}

View File

@@ -36,5 +36,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.0.64"
"version": "0.0.66"
}

View File

@@ -3,6 +3,7 @@ from time import sleep
from detect import DetectionSession, DetectPlugin
from typing import Any, List, Tuple
import numpy as np
import asyncio
import cv2
import imutils
Gst = None
@@ -10,7 +11,7 @@ try:
from gi.repository import Gst
except:
pass
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected, Setting
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected, Setting, VideoFrame
from PIL import Image
class OpenCVDetectionSession(DetectionSession):
@@ -93,6 +94,9 @@ class OpenCVPlugin(DetectPlugin):
def get_pixel_format(self):
return self.pixelFormat
def get_input_format(self) -> str:
return 'gray'
def parse_settings(self, settings: Any):
area = defaultArea
@@ -106,7 +110,8 @@ class OpenCVPlugin(DetectPlugin):
blur = int(settings.get('blur', blur))
return area, threshold, interval, blur
def detect(self, detection_session: OpenCVDetectionSession, frame, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
def detect(self, detection_session: OpenCVDetectionSession, frame, src_size, convert_to_src_size) -> ObjectsDetected:
settings = detection_session.settings
area, threshold, interval, blur = self.parse_settings(settings)
# see get_detection_input_size on undocumented size requirements for GRAY8
@@ -119,10 +124,15 @@ class OpenCVPlugin(DetectPlugin):
detection_session.curFrame = cv2.GaussianBlur(
gray, (blur, blur), 0, dst=detection_session.curFrame)
detections: List[ObjectDetectionResult] = []
detection_result: ObjectsDetected = {}
detection_result['detections'] = detections
detection_result['inputDimensions'] = src_size
if detection_session.previous_frame is None:
detection_session.previous_frame = detection_session.curFrame
detection_session.curFrame = None
return
return detection_result
detection_session.frameDelta = cv2.absdiff(
detection_session.previous_frame, detection_session.curFrame, dst=detection_session.frameDelta)
@@ -138,10 +148,6 @@ class OpenCVPlugin(DetectPlugin):
detection_session.dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(fcontours)
detections: List[ObjectDetectionResult] = []
detection_result: ObjectsDetected = {}
detection_result['detections'] = detections
detection_result['inputDimensions'] = src_size
for c in contours:
x, y, w, h = cv2.boundingRect(c)
@@ -163,6 +169,9 @@ class OpenCVPlugin(DetectPlugin):
detections.append(detection)
return detection_result
def get_input_details(self) -> Tuple[int, int, int]:
return (300, 300, 1)
def get_detection_input_size(self, src_size):
# The initial implementation of this plugin used BGRA
@@ -197,11 +206,45 @@ class OpenCVPlugin(DetectPlugin):
detection_session.cap = None
return super().end_session(detection_session)
def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
async def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
# todo
raise Exception('can not run motion detection on image')
async def run_detection_videoframe(self, videoFrame: VideoFrame, detection_session: OpenCVDetectionSession) -> ObjectsDetected:
width = videoFrame.width
height = videoFrame.height
def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
aspectRatio = width / height
# dont bother resizing if its already fairly small
if width <= 640 and height < 640:
scale = 1
resize = None
elif aspectRatio > 1:
scale = height / 300
resize = {
'height': 300,
'width': int(300 * aspectRatio)
}
else:
scale = width / 300
resize = {
'width': 300,
'height': int(300 / aspectRatio)
}
buffer = await videoFrame.toBuffer({
'resize': resize,
})
def convert_to_src_size(point, normalize = False):
return point[0] * scale, point[1] * scale, True
mat = np.ndarray((videoFrame.height, videoFrame.width, self.pixelFormatChannelCount), buffer=buffer, dtype=np.uint8)
detections = self.detect(
detection_session, mat, (width, height), convert_to_src_size)
return detections
async def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
if avframe.format.name != 'yuv420p' and avframe.format.name != 'yuvj420p':
mat = avframe.to_ndarray(format='gray8')
else:
@@ -209,11 +252,11 @@ class OpenCVPlugin(DetectPlugin):
detections = self.detect(
detection_session, mat, settings, src_size, convert_to_src_size)
if not detections or not len(detections['detections']):
self.detection_sleep(settings)
await self.detection_sleep(settings)
return None, None
return detections, None
def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
async def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
buf = gst_sample.get_buffer()
caps = gst_sample.get_caps()
# can't trust the width value, compute the stride
@@ -230,24 +273,24 @@ class OpenCVPlugin(DetectPlugin):
buffer=info.data,
dtype=np.uint8)
detections = self.detect(
detection_session, mat, settings, src_size, convert_to_src_size)
detection_session, mat, src_size, convert_to_src_size)
# no point in triggering empty events.
finally:
buf.unmap(info)
if not detections or not len(detections['detections']):
self.detection_sleep(settings)
await self.detection_sleep(settings)
return None, None
return detections, None
def create_detection_session(self):
return OpenCVDetectionSession()
def detection_sleep(self, settings: Any):
async def detection_sleep(self, settings: Any):
area, threshold, interval, blur = self.parse_settings(settings)
# it is safe to block here because gstreamer creates a queue thread
sleep(interval / 1000)
await asyncio.sleep(interval / 1000)
def detection_event_notified(self, settings: Any):
self.detection_sleep(settings)
return super().detection_event_notified(settings)
async def detection_event_notified(self, settings: Any):
await self.detection_sleep(settings)
return await super().detection_event_notified(settings)

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/python-codecs",
"version": "0.0.23",
"version": "0.1.6",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/python-codecs",
"version": "0.0.23",
"version": "0.1.6",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -1,6 +1,7 @@
{
"name": "@scrypted/python-codecs",
"description": "Scrypted Python Codecs",
"version": "0.1.6",
"description": "Python Codecs for Scrypted",
"keywords": [
"scrypted",
"plugin",
@@ -24,11 +25,10 @@
"runtime": "python",
"type": "API",
"interfaces": [
"VideoFrameGenerator"
"DeviceProvider"
]
},
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.0.23"
}
}

View File

@@ -16,8 +16,12 @@ except:
class Callback:
def __init__(self, callback) -> None:
self.loop = asyncio.get_running_loop()
self.callback = callback
if callback:
self.loop = asyncio.get_running_loop()
self.callback = callback
else:
self.loop = None
self.callback = None
def createPipelineIterator(pipeline: str):
pipeline = '{pipeline} ! queue leaky=downstream max-size-buffers=0 ! appsink name=appsink emit-signals=true sync=false max-buffers=-1 drop=true'.format(pipeline=pipeline)

View File

@@ -1,3 +1,4 @@
import time
from gstreamer import createPipelineIterator
import asyncio
from util import optional_chain
@@ -5,10 +6,9 @@ import scrypted_sdk
from typing import Any
from urllib.parse import urlparse
import pyvips
import threading
import traceback
import concurrent.futures
Gst = None
try:
import gi
gi.require_version('Gst', '1.0')
@@ -18,6 +18,14 @@ try:
except:
pass
av = None
try:
import av
av.logging.set_level(av.logging.PANIC)
except:
pass
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
vipsExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="vips")
async def to_thread(f):
@@ -97,20 +105,100 @@ async def createVipsMediaObject(image: VipsImage):
})
return ret
class PythonCodecs(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.VideoFrameGenerator):
class LibavGenerator(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.VideoFrameGenerator):
async def generateVideoFrames(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
worker = scrypted_sdk.fork()
forked: CodecFork = await worker.result
return await forked.generateVideoFramesLibav(mediaObject, options, filter)
class GstreamerGenerator(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.VideoFrameGenerator):
async def generateVideoFrames(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
worker = scrypted_sdk.fork()
forked: CodecFork = await worker.result
return await forked.generateVideoFramesGstreamer(mediaObject, options, filter)
class PythonCodecs(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.DeviceProvider):
def __init__(self, nativeId = None):
super().__init__(nativeId)
async def generateVideoFrames(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
worker = scrypted_sdk.fork()
forked = await worker.result
return await forked.generateVideoFrames(mediaObject, options, filter)
asyncio.ensure_future(self.initialize())
async def initialize(self):
manifest: scrypted_sdk.DeviceManifest = {
'devices': [],
}
if Gst:
gstDevice: scrypted_sdk.Device = {
'name': 'Gstreamer',
'nativeId': 'gstreamer',
'interfaces': [
scrypted_sdk.ScryptedInterface.VideoFrameGenerator.value,
],
'type': scrypted_sdk.ScryptedDeviceType.API.value,
}
manifest['devices'].append(gstDevice)
if av:
avDevice: scrypted_sdk.Device = {
'name': 'Libav',
'nativeId': 'libav',
'interfaces': [
scrypted_sdk.ScryptedInterface.VideoFrameGenerator.value,
],
'type': scrypted_sdk.ScryptedDeviceType.API.value,
}
manifest['devices'].append(avDevice)
await scrypted_sdk.deviceManager.onDevicesChanged(manifest)
def getDevice(self, nativeId: str) -> Any:
if nativeId == 'gstreamer':
return GstreamerGenerator('gstreamer')
if nativeId == 'libav':
return LibavGenerator('libav')
def create_scrypted_plugin():
return PythonCodecs()
async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
ffmpegInput: scrypted_sdk.FFmpegInput = await scrypted_sdk.mediaManager.convertMediaObjectToJSON(mediaObject, scrypted_sdk.ScryptedMimeTypes.FFmpegInput.value)
videosrc = ffmpegInput.get('url')
container = av.open(videosrc, options = options)
# none of this stuff seems to work. might be libav being slow with rtsp.
# container.no_buffer = True
# container.options['-analyzeduration'] = '0'
# container.options['-probesize'] = '500000'
stream = container.streams.video[0]
# stream.codec_context.thread_count = 1
# stream.codec_context.low_delay = True
# stream.codec_context.options['-analyzeduration'] = '0'
# stream.codec_context.options['-probesize'] = '500000'
async def generateVideoFrames(mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
start = 0
try:
for idx, frame in enumerate(container.decode(stream)):
now = time.time()
if not start:
start = now
elapsed = now - start
if (frame.time or 0) < elapsed - 0.500:
# print('too slow, skipping frame')
continue
# print(frame)
vips = pyvips.Image.new_from_array(frame.to_ndarray(format='rgb24'))
vipsImage = VipsImage(vips)
try:
mo = await createVipsMediaObject(VipsImage(vips))
yield mo
finally:
vipsImage.vipsImage.invalidate()
vipsImage.vipsImage = None
finally:
container.close()
async def generateVideoFramesGstreamer(mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
ffmpegInput: scrypted_sdk.FFmpegInput = await scrypted_sdk.mediaManager.convertMediaObjectToJSON(mediaObject, scrypted_sdk.ScryptedMimeTypes.FFmpegInput.value)
container = ffmpegInput.get('container', None)
videosrc = ffmpegInput.get('url')
@@ -131,7 +219,22 @@ async def generateVideoFrames(mediaObject: scrypted_sdk.MediaObject, options: sc
if videoCodec == 'h264':
videosrc += ' ! rtph264depay ! h264parse'
videosrc += ' ! decodebin ! queue leaky=downstream max-size-buffers=0 ! videoconvert ! video/x-raw,format=RGB'
videocaps = 'video/x-raw'
# if options and options.get('resize'):
# videocaps = 'videoscale ! video/x-raw,width={width},height={height}'.format(width=options['resize']['width'], height=options['resize']['height'])
format = options and options.get('format')
# I420 is a cheap way to get gray out of an h264 stream without color conversion.
if format == 'gray':
format = 'I420'
bands = 1
else:
format = 'RGB'
bands = 3
videocaps += ',format={format}'.format(format=format)
videosrc += ' ! decodebin ! queue leaky=downstream max-size-buffers=0 ! videoconvert ! ' + videocaps
gst, gen = createPipelineIterator(videosrc)
async for gstsample in gen():
@@ -144,8 +247,7 @@ async def generateVideoFrames(mediaObject: scrypted_sdk.MediaObject, options: sc
continue
try:
# pyvips.Image.new_from_memory(info.data, width, height, 3, pyvips.BandFormat.UCHAR)
vips = pyvips.Image.new_from_memory(info.data, width, height, 3, pyvips.BandFormat.UCHAR)
vips = pyvips.Image.new_from_memory(info.data, width, height, bands, pyvips.BandFormat.UCHAR)
vipsImage = VipsImage(vips)
try:
mo = await createVipsMediaObject(VipsImage(vips))
@@ -157,9 +259,18 @@ async def generateVideoFrames(mediaObject: scrypted_sdk.MediaObject, options: sc
gst_buffer.unmap(info)
class CodecFork:
async def generateVideoFrames(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
async def generateVideoFramesGstreamer(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
try:
async for data in generateVideoFrames(mediaObject, options, filter):
async for data in generateVideoFramesGstreamer(mediaObject, options, filter):
yield data
finally:
import os
os._exit(os.EX_OK)
pass
async def generateVideoFramesLibav(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
try:
async for data in generateVideoFramesLibav(mediaObject, options, filter):
yield data
finally:
import os

View File

@@ -9,7 +9,9 @@ Do not enable prebuffer on Ring cameras and doorbells.
* The persistent live stream will drain the battery faster than it can charge.
* The persistent live stream will also count against ISP bandwidth limits.
## Supported Cameras
## Supported Devices
### Cameras
- Ring Video Doorbell Wired, Pro, Pro 2, 4, 3, 2nd Gen
- Ring Floodlight Cam Wired Plus
- Ring Floodlight Cam Wired Pro
@@ -17,6 +19,15 @@ Do not enable prebuffer on Ring cameras and doorbells.
- Ring Indoor Cam
- Ring Stick-Up Cam (Wired and Battery)
### Other Devices
- Security Panel
- Location Modes
- Contact Sensor / Retrofit Alarm Zones / Tilt Sensor
- Motion Sensor
- Flood / Freeze Sensor
- Water Sensor
- Smart Locks
## Problems and Solutions
I can see artifacts in HKSV recordings

View File

@@ -1,17 +1,17 @@
{
"name": "@scrypted/ring",
"version": "0.0.98",
"version": "0.0.100",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/ring",
"version": "0.0.98",
"version": "0.0.100",
"dependencies": {
"@koush/ring-client-api": "file:../../external/ring-client-api",
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",
"@types/node": "^18.14.5",
"@types/node": "^18.15.3",
"axios": "^1.3.4",
"rxjs": "^7.8.0"
},
@@ -49,7 +49,7 @@
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.2.82",
"version": "0.2.85",
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.18.6",
@@ -148,9 +148,9 @@
}
},
"node_modules/@types/node": {
"version": "18.14.5",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.14.5.tgz",
"integrity": "sha512-CRT4tMK/DHYhw1fcCEBwME9CSaZNclxfzVMe7GsO6ULSwsttbj70wSiX6rZdIjGblu93sTJxLdhNIT85KKI7Qw=="
"version": "18.15.3",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.15.3.tgz",
"integrity": "sha512-p6ua9zBxz5otCmbpb5D3U4B5Nanw6Pk3PPyX05xnxbB/fRv71N7CPmORg7uAD5P70T0xmx1pzAx/FUfa5X+3cw=="
},
"node_modules/@types/responselike": {
"version": "1.0.0",

View File

@@ -36,7 +36,7 @@
"@koush/ring-client-api": "file:../../external/ring-client-api",
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",
"@types/node": "^18.14.5",
"@types/node": "^18.15.3",
"axios": "^1.3.4",
"rxjs": "^7.8.0"
},
@@ -44,5 +44,5 @@
"got": "11.8.6",
"socket.io-client": "^2.5.0"
},
"version": "0.0.98"
"version": "0.0.100"
}

View File

@@ -3,15 +3,15 @@ import { RefreshPromise } from "@scrypted/common/src/promise-utils";
import { connectRTCSignalingClients } from '@scrypted/common/src/rtc-signaling';
import { RtspServer } from '@scrypted/common/src/rtsp-server';
import { addTrackControls, parseSdp, replacePorts } from '@scrypted/common/src/sdp-utils';
import sdk, { Battery, BinarySensor, Camera, Device, DeviceProvider, EntrySensor, FFmpegInput, FloodSensor, Lock, LockState, MediaObject, MediaStreamUrl, MotionSensor, OnOff, PictureOptions, RequestMediaStreamOptions, RequestPictureOptions, ResponseMediaStreamOptions, RTCAVSignalingSetup, RTCSessionControl, RTCSignalingChannel, RTCSignalingSendIceCandidate, RTCSignalingSession, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, SecuritySystem, SecuritySystemMode, Setting, Settings, SettingValue, TamperSensor, VideoCamera, VideoClip, VideoClipOptions, VideoClips } from '@scrypted/sdk';
import { StorageSettings } from '@scrypted/sdk/storage-settings';
import sdk, { Battery, BinarySensor, Camera, Device, DeviceProvider, EntrySensor, FFmpegInput, FloodSensor, MediaObject, MediaStreamUrl, MotionSensor, OnOff, PictureOptions, RequestMediaStreamOptions, RequestPictureOptions, ResponseMediaStreamOptions, RTCAVSignalingSetup, RTCSessionControl, RTCSignalingChannel, RTCSignalingSendIceCandidate, RTCSignalingSession, ScryptedDeviceBase, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, SecuritySystem, SecuritySystemMode, Setting, Settings, SettingValue, TamperSensor, VideoCamera } from '@scrypted/sdk';
import child_process, { ChildProcess } from 'child_process';
import dgram from 'dgram';
import { RtcpReceiverInfo, RtcpRrPacket } from '../../../external/werift/packages/rtp/src/rtcp/rr';
import { RtpPacket } from '../../../external/werift/packages/rtp/src/rtp/rtp';
import { ProtectionProfileAes128CmHmacSha1_80 } from '../../../external/werift/packages/rtp/src/srtp/const';
import { SrtcpSession } from '../../../external/werift/packages/rtp/src/srtp/srtcp';
import { Location, LocationMode, RingDevice, isStunMessage, RtpDescription, SipSession, BasicPeerConnection, CameraData, clientApi, generateUuid, RingBaseApi, RingRestClient, rxjs, SimpleWebRtcSession, StreamingSession, RingDeviceType, RingDeviceData } from './ring-client-api';
import { BasicPeerConnection, CameraData, clientApi, generateUuid, isStunMessage, Location, LocationMode, RingBaseApi, RingDevice, RingDeviceData, RingDeviceType, RingRestClient, RtpDescription, rxjs, SimpleWebRtcSession, SipSession, StreamingSession } from './ring-client-api';
import { encodeSrtpOptions, getPayloadType, getSequenceNumber, isRtpMessagePayloadType } from './srtp-utils';
const STREAM_TIMEOUT = 120000;
@@ -79,7 +79,7 @@ class RingCameraSiren extends ScryptedDeviceBase implements OnOff {
}
}
class RingCameraDevice extends ScryptedDeviceBase implements DeviceProvider, Camera, MotionSensor, BinarySensor, RTCSignalingChannel {
class RingCameraDevice extends ScryptedDeviceBase implements DeviceProvider, Camera, MotionSensor, BinarySensor, RTCSignalingChannel, VideoClips {
buttonTimeout: NodeJS.Timeout;
session: SipSession;
rtpDescription: RtpDescription;
@@ -89,6 +89,7 @@ class RingCameraDevice extends ScryptedDeviceBase implements DeviceProvider, Cam
currentMediaMimeType: string;
refreshTimeout: NodeJS.Timeout;
picturePromise: RefreshPromise<Buffer>;
videoClips = new Map<string, VideoClip>();
constructor(public plugin: RingPlugin, public location: RingLocationDevice, nativeId: string) {
super(nativeId);
@@ -98,7 +99,6 @@ class RingCameraDevice extends ScryptedDeviceBase implements DeviceProvider, Cam
this.batteryLevel = this.findCamera()?.batteryLevel;
}
async startIntercom(media: MediaObject): Promise<void> {
if (!this.session)
throw new Error("not in call");
@@ -659,9 +659,100 @@ class RingCameraDevice extends ScryptedDeviceBase implements DeviceProvider, Cam
siren.on = data.siren_status.seconds_remaining > 0 ? true : false;
}
}
async getVideoClips(options?: VideoClipOptions): Promise<VideoClip[]> {
this.videoClips = new Map<string, VideoClip>;
const response = await this.findCamera().videoSearch({
dateFrom: options.startTime,
dateTo: options.endTime,
});
return response.video_search.map((result) => {
const videoClip = {
id: result.ding_id,
startTime: result.created_at,
duration: Math.round(result.duration * 1000),
event: result.kind.toString(),
description: result.kind.toString(),
thumbnailId: result.ding_id,
resources: {
thumbnail: {
href: result.thumbnail_url
},
video: {
href: result.hq_url
}
}
}
this.videoClips.set(result.ding_id, videoClip)
return videoClip;
});
}
async getVideoClip(videoId: string): Promise<MediaObject> {
if (this.videoClips.has(videoId)) {
return mediaManager.createMediaObjectFromUrl(this.videoClips.get(videoId).resources.video.href);
}
throw new Error('Failed to get video clip.')
}
async getVideoClipThumbnail(thumbnailId: string): Promise<MediaObject> {
if (this.videoClips.has(thumbnailId)) {
return mediaManager.createMediaObjectFromUrl(this.videoClips.get(thumbnailId).resources.thumbnail.href);
}
throw new Error('Failed to get video clip thumbnail.')
}
async removeVideoClips(...videoClipIds: string[]): Promise<void> {
throw new Error('Removing video clips not supported.');
}
}
class RingLock extends ScryptedDeviceBase implements Battery, Lock {
device: RingDevice
constructor(nativeId: string, device: RingDevice) {
super(nativeId);
this.device = device;
device.onData.subscribe(async (data: RingDeviceData) => {
this.updateState(data);
});
}
async lock(): Promise<void> {
return this.device.sendCommand('lock.lock');
}
async unlock(): Promise<void> {
return this.device.sendCommand('lock.unlock');
}
updateState(data: RingDeviceData) {
this.batteryLevel = data.batteryLevel;
switch (data.locked) {
case 'locked':
this.lockState = LockState.Locked;
break;
case 'unlocked':
this.lockState = LockState.Unlocked;
break;
case 'jammed':
this.lockState = LockState.Jammed;
break;
default:
this.lockState = undefined;
}
}
}
class RingSensor extends ScryptedDeviceBase implements TamperSensor, Battery, EntrySensor, MotionSensor, FloodSensor {
constructor(nativeId: string, device: RingDevice) {
super(nativeId);
device.onData.subscribe(async (data: RingDeviceData) => {
this.updateState(data);
});
}
updateState(data: RingDeviceData) {
this.tampered = data.tamperStatus === 'tamper';
this.batteryLevel = data.batteryLevel;
@@ -673,6 +764,7 @@ class RingSensor extends ScryptedDeviceBase implements TamperSensor, Battery, En
export class RingLocationDevice extends ScryptedDeviceBase implements DeviceProvider, SecuritySystem {
devices = new Map<string, any>();
locationDevices = new Map<string, RingDevice>();
constructor(public plugin: RingPlugin, nativeId: string) {
super(nativeId);
@@ -762,11 +854,21 @@ export class RingLocationDevice extends ScryptedDeviceBase implements DeviceProv
return this.plugin.locations.find(l => l.id === this.nativeId);
}
async findRingDeviceAtLocation(id: string): Promise<RingDevice> {
const location = this.findLocation();
return (await location.getDevices()).find((x) => x.id === id);
}
async getDevice(nativeId: string) {
if (!this.devices.has(nativeId)) {
if (nativeId.endsWith('-sensor')) {
const sensor = new RingSensor(nativeId);
this.devices.set(nativeId, sensor);
const ringRevice = await this.findRingDeviceAtLocation(nativeId.replace('-sensor', ''));
const device = new RingSensor(nativeId, ringRevice);
this.devices.set(nativeId, device);
} else if (nativeId.endsWith('-lock')) {
const ringRevice = await this.findRingDeviceAtLocation(nativeId.replace('-lock', ''));
const device = new RingLock(nativeId, ringRevice);
this.devices.set(nativeId, device);
} else {
const camera = new RingCameraDevice(this.plugin, this, nativeId);
this.devices.set(nativeId, camera);
@@ -967,6 +1069,7 @@ class RingPlugin extends ScryptedDeviceBase implements DeviceProvider, Settings
interfaces.push(
ScryptedInterface.VideoCamera,
ScryptedInterface.Intercom,
ScryptedInterface.VideoClips,
);
}
if (camera.operatingOnBattery)
@@ -1028,41 +1131,52 @@ class RingPlugin extends ScryptedDeviceBase implements DeviceProvider, Settings
});
}
const sensors = (await location.getDevices()).filter(x => {
const supportedSensors = [
RingDeviceType.ContactSensor,
RingDeviceType.RetrofitZone,
RingDeviceType.TiltSensor,
RingDeviceType.MotionSensor,
RingDeviceType.FloodFreezeSensor,
RingDeviceType.WaterSensor,
]
return x.data.status !== 'disabled' && (supportedSensors.includes(x.data.deviceType))
});
for (const sensor of sensors) {
const nativeId = sensor.id.toString() + '-sensor';
const data: RingDeviceData = sensor.data;
// add location devices
const locationDevices = await location.getDevices();
for (const locationDevice of locationDevices) {
const data: RingDeviceData = locationDevice.data;
let nativeId: string;
let type: ScryptedDeviceType;
let interfaces: ScryptedInterface[] = [];
const interfaces = [ScryptedInterface.TamperSensor];
switch (data.deviceType){
if (data.status === 'disabled') {
continue;
}
switch (data.deviceType) {
case RingDeviceType.ContactSensor:
case RingDeviceType.RetrofitZone:
case RingDeviceType.TiltSensor:
interfaces.push(ScryptedInterface.EntrySensor);
nativeId = locationDevice.id.toString() + '-sensor';
type = ScryptedDeviceType.Sensor
interfaces.push(ScryptedInterface.TamperSensor, ScryptedInterface.EntrySensor);
break;
case RingDeviceType.MotionSensor:
interfaces.push(ScryptedInterface.MotionSensor);
nativeId = locationDevice.id.toString() + '-sensor';
type = ScryptedDeviceType.Sensor
interfaces.push(ScryptedInterface.TamperSensor, ScryptedInterface.MotionSensor);
break;
case RingDeviceType.FloodFreezeSensor:
case RingDeviceType.WaterSensor:
interfaces.push(ScryptedInterface.FloodSensor);
nativeId = locationDevice.id.toString() + '-sensor';
type = ScryptedDeviceType.Sensor
interfaces.push(ScryptedInterface.TamperSensor, ScryptedInterface.FloodSensor);
break;
default: break;
default:
if (/^lock($|\.)/.test(data.deviceType)) {
nativeId = locationDevice.id.toString() + '-lock';
type = ScryptedDeviceType.Lock
interfaces.push(ScryptedInterface.Lock);
break;
} else {
this.console.debug(`discovered and ignoring unsupported '${locationDevice.deviceType}' device: '${locationDevice.name}'`)
continue;
}
}
if (data.batteryStatus !== 'none')
interfaces.push(ScryptedInterface.Battery);
const device: Device = {
info: {
model: data.deviceType,
@@ -1071,22 +1185,11 @@ class RingPlugin extends ScryptedDeviceBase implements DeviceProvider, Settings
},
providerNativeId: location.id,
nativeId: nativeId,
name: sensor.name,
type: ScryptedDeviceType.Sensor,
name: locationDevice.name,
type: type,
interfaces,
};
devices.push(device);
const getScryptedDevice = async () => {
const locationDevice = await this.getDevice(location.id);
const scryptedDevice = await locationDevice?.getDevice(nativeId);
return scryptedDevice as RingSensor;
}
sensor.onData.subscribe(async (data: RingDeviceData) => {
const scryptedDevice = await getScryptedDevice();
scryptedDevice?.updateState(data)
});
}
await deviceManager.onDevicesChanged({

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/snapshot",
"version": "0.0.48",
"version": "0.0.49",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/snapshot",
"version": "0.0.48",
"version": "0.0.49",
"dependencies": {
"@koush/axios-digest-auth": "^0.8.5",
"@types/node": "^16.6.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/snapshot",
"version": "0.0.48",
"version": "0.0.49",
"description": "Snapshot Plugin for Scrypted",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",

View File

@@ -1,19 +0,0 @@
#!/bin/sh
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
mkdir -p all_models
wget https://dl.google.com/coral/canned_models/all_models.tar.gz
tar -C all_models -xvzf all_models.tar.gz
rm -f all_models.tar.gz

View File

@@ -1 +0,0 @@
../all_models/coco_labels.txt

View File

@@ -1 +0,0 @@
../all_models/mobilenet_ssd_v2_coco_quant_postprocess.tflite

View File

@@ -1 +0,0 @@
../all_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/tensorflow-lite",
"version": "0.0.110",
"version": "0.1.2",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/tensorflow-lite",
"version": "0.0.110",
"version": "0.1.2",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -44,5 +44,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.0.110"
"version": "0.1.2"
}

View File

@@ -122,6 +122,9 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
def get_input_details(self) -> Tuple[int, int, int]:
pass
def get_input_format(self) -> str:
pass
def getModelSettings(self, settings: Any = None) -> list[Setting]:
return []
@@ -131,6 +134,7 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
'classes': self.getClasses(),
'triggerClasses': self.getTriggerClasses(),
'inputSize': self.get_input_details(),
'inputFormat': self.get_input_format(),
'settings': [],
}
@@ -206,14 +210,14 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
def run_detection_gstsample(self, detection_session: DetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
pass
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame) -> ObjectsDetected:
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: DetectionSession) -> ObjectsDetected:
pass
def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
async def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
pil: Image.Image = avframe.to_image()
return self.run_detection_image(detection_session, pil, settings, src_size, convert_to_src_size)
return await self.run_detection_image(detection_session, pil, settings, src_size, convert_to_src_size)
def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
async def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
pass
def run_detection_crop(self, detection_session: DetectionSession, sample: Any, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected:
@@ -288,17 +292,22 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
async def generateObjectDetections(self, videoFrames: Any, session: ObjectDetectionGeneratorSession = None) -> Any:
try:
videoFrames = await scrypted_sdk.sdk.connectRPCObject(videoFrames)
detection_session = self.create_detection_session()
detection_session.plugin = self
detection_session.settings = session and session.get('settings')
async for videoFrame in videoFrames:
detected = await self.run_detection_videoframe(videoFrame, session and session.get('settings'))
detected = await self.run_detection_videoframe(videoFrame, detection_session)
yield {
'__json_copy_serialize_children': True,
'detected': detected,
'videoFrame': videoFrame,
}
except:
raise
await self.detection_event_notified(detection_session.settings)
finally:
await videoFrames.aclose()
try:
await videoFrames.aclose()
except:
pass
async def detectObjects(self, mediaObject: MediaObject, session: ObjectDetectionSession = None, callbacks: ObjectDetectionCallbacks = None) -> ObjectsDetected:
is_image = mediaObject and (mediaObject.mimeType.startswith('image/') or mediaObject.mimeType.endswith('/x-raw-image'))
@@ -335,7 +344,7 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
finally:
detection_session.running = False
else:
return self.run_detection_jpeg(detection_session, bytes(await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, 'image/jpeg')), settings)
return await self.run_detection_jpeg(detection_session, bytes(await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, 'image/jpeg')), settings)
if not create:
# a detection session may have been created, but not started
@@ -456,7 +465,7 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
return ret
def detection_event_notified(self, settings: Any):
async def detection_event_notified(self, settings: Any):
pass
async def createMedia(self, data: Any) -> MediaObject:
@@ -479,7 +488,7 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
if not current_data:
raise Exception('no sample')
detection_result = self.run_detection_crop(
detection_result = await self.run_detection_crop(
detection_session, current_data, detection_session.settings, current_src_size, current_convert_to_src_size, boundingBox)
return detection_result['detections']
@@ -493,7 +502,7 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
first_frame = False
print("first frame received", detection_session.id)
detection_result, data = run_detection(
detection_result, data = await run_detection(
detection_session, sample, detection_session.settings, src_size, convert_to_src_size)
if detection_result:
detection_result['running'] = True
@@ -527,7 +536,7 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
self.invalidateMedia(detection_session, data)
# asyncio.run_coroutine_threadsafe(, loop = self.loop).result()
self.detection_event_notified(detection_session.settings)
await self.detection_event_notified(detection_session.settings)
if not detection_session or duration == None:
safe_set_result(detection_session.loop,

View File

@@ -8,6 +8,8 @@ from typing import Any, List, Tuple, Mapping
import asyncio
import time
from .rectangle import Rectangle, intersect_area, intersect_rect, to_bounding_box, from_bounding_box, combine_rect
import urllib.request
import os
from detect import DetectionSession, DetectPlugin
@@ -126,6 +128,17 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
loop = asyncio.get_event_loop()
loop.call_later(4 * 60 * 60, lambda: self.requestRestart())
def downloadFile(self, url: str, filename: str):
filesPath = os.path.join(os.environ['SCRYPTED_PLUGIN_VOLUME'], 'files')
fullpath = os.path.join(filesPath, filename)
if os.path.isfile(fullpath):
return fullpath
os.makedirs(filesPath, exist_ok=True)
tmp = fullpath + '.tmp'
urllib.request.urlretrieve(url, tmp)
os.rename(tmp, fullpath)
return fullpath
def getClasses(self) -> list[str]:
return list(self.labels.values())
@@ -250,13 +263,13 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
# print(detection_result)
return detection_result
def run_detection_jpeg(self, detection_session: PredictSession, image_bytes: bytes, settings: Any) -> ObjectsDetected:
async def run_detection_jpeg(self, detection_session: PredictSession, image_bytes: bytes, settings: Any) -> ObjectsDetected:
stream = io.BytesIO(image_bytes)
image = Image.open(stream)
if image.mode == 'RGBA':
image = image.convert('RGB')
detections, _ = self.run_detection_image(detection_session, image, settings, image.size)
detections, _ = await self.run_detection_image(detection_session, image, settings, image.size)
return detections
def get_detection_input_size(self, src_size):
@@ -266,13 +279,14 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
# this is useful for high quality thumbnails.
return (None, None)
def get_input_size(self) -> Tuple[float, float]:
def get_input_size(self) -> Tuple[int, int]:
pass
def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
pass
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, settings: Any) -> ObjectsDetected:
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: PredictSession) -> ObjectsDetected:
settings = detection_session and detection_session.settings
src_size = videoFrame.width, videoFrame.height
w, h = self.get_input_size()
iw, ih = src_size
@@ -288,7 +302,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
})
image = Image.frombuffer('RGB', (w, h), data)
try:
ret = self.detect_once(image, settings, src_size, cvss)
ret = await self.detect_once(image, settings, src_size, cvss)
return ret
finally:
image.close()
@@ -339,9 +353,9 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
def cvss2(point, normalize=False):
return point[0] / s + ow, point[1] / s + oh, True
ret1 = self.detect_once(first, settings, src_size, cvss1)
ret1 = await self.detect_once(first, settings, src_size, cvss1)
first.close()
ret2 = self.detect_once(second, settings, src_size, cvss2)
ret2 = await self.detect_once(second, settings, src_size, cvss2)
second.close()
two_intersect = intersect_rect(Rectangle(*first_crop), Rectangle(*second_crop))
@@ -374,7 +388,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
ret['detections'] = dedupe_detections(ret1['detections'] + ret2['detections'], is_same_detection=is_same_detection_middle)
return ret
def run_detection_image(self, detection_session: PredictSession, image: Image.Image, settings: Any, src_size, convert_to_src_size: Any = None, multipass_crop: Tuple[float, float, float, float] = None):
async def run_detection_image(self, detection_session: PredictSession, image: Image.Image, settings: Any, src_size, convert_to_src_size: Any = None, multipass_crop: Tuple[float, float, float, float] = None):
(w, h) = self.get_input_size() or image.size
(iw, ih) = image.size
@@ -448,7 +462,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
converted = convert_to_src_size(unscaled, normalize) if convert_to_src_size else (unscaled[0], unscaled[1], True)
return converted
ret = self.detect_once(input, settings, src_size, cvss)
ret = await self.detect_once(input, settings, src_size, cvss)
input.close()
detection_session.processed = detection_session.processed + 1
return ret, RawImage(image)
@@ -461,7 +475,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
converted = convert_to_src_size(point, normalize) if convert_to_src_size else (point[0], point[1], True)
return converted
ret = self.detect_once(image, settings, src_size, cvss)
ret = await self.detect_once(image, settings, src_size, cvss)
if detection_session:
detection_session.processed = detection_session.processed + 1
else:
@@ -483,11 +497,11 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
converted = convert_to_src_size(unscaled, normalize) if convert_to_src_size else (unscaled[0], unscaled[1], True)
return converted
ret1 = self.detect_once(first, settings, src_size, cvss1)
ret1 = await self.detect_once(first, settings, src_size, cvss1)
first.close()
if detection_session:
detection_session.processed = detection_session.processed + 1
ret2 = self.detect_once(second, settings, src_size, cvss2)
ret2 = await self.detect_once(second, settings, src_size, cvss2)
if detection_session:
detection_session.processed = detection_session.processed + 1
second.close()
@@ -576,11 +590,11 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
# print('untracked %s: %s' % (d['className'], d['score']))
def run_detection_crop(self, detection_session: DetectionSession, sample: RawImage, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected:
(ret, _) = self.run_detection_image(detection_session, sample.image, settings, src_size, convert_to_src_size, bounding_box)
async def run_detection_crop(self, detection_session: DetectionSession, sample: RawImage, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected:
(ret, _) = await self.run_detection_image(detection_session, sample.image, settings, src_size, convert_to_src_size, bounding_box)
return ret
def run_detection_gstsample(self, detection_session: PredictSession, gstsample, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Image.Image]:
async def run_detection_gstsample(self, detection_session: PredictSession, gstsample, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Image.Image]:
caps = gstsample.get_caps()
# can't trust the width value, compute the stride
height = caps.get_structure(0).get_value('height')
@@ -604,7 +618,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
gst_buffer.unmap(info)
try:
return self.run_detection_image(detection_session, image, settings, src_size, convert_to_src_size)
return await self.run_detection_image(detection_session, image, settings, src_size, convert_to_src_size)
except:
image.close()
traceback.print_exc()

View File

@@ -3,7 +3,6 @@ import threading
from .common import *
from PIL import Image
from pycoral.adapters import detect
from pycoral.adapters.common import input_size
loaded_py_coral = False
try:
from pycoral.utils.edgetpu import list_edge_tpus
@@ -19,6 +18,9 @@ import scrypted_sdk
from scrypted_sdk.types import Setting
from typing import Any, Tuple
from predict import PredictPlugin
import concurrent.futures
import queue
import asyncio
def parse_label_contents(contents: str):
lines = contents.splitlines()
@@ -38,9 +40,15 @@ class TensorFlowLitePlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted
def __init__(self, nativeId: str | None = None):
super().__init__(MIME_TYPE, nativeId=nativeId)
labels_contents = scrypted_sdk.zip.open(
'fs/coco_labels.txt').read().decode('utf8')
tfliteFile = self.downloadFile('https://raw.githubusercontent.com/google-coral/test_data/master/ssd_mobilenet_v2_coco_quant_postprocess.tflite', 'ssd_mobilenet_v2_coco_quant_postprocess.tflite')
edgetpuFile = self.downloadFile('https://raw.githubusercontent.com/google-coral/test_data/master/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite', 'ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite')
labelsFile = self.downloadFile('https://raw.githubusercontent.com/google-coral/test_data/master/coco_labels.txt', 'coco_labels.txt')
labels_contents = open(labelsFile, 'r').read()
self.labels = parse_label_contents(labels_contents)
self.interpreters = queue.Queue()
self.interpreter_count = 0
try:
edge_tpus = list_edge_tpus()
print('edge tpus', edge_tpus)
@@ -49,23 +57,39 @@ class TensorFlowLitePlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted
self.edge_tpu_found = str(edge_tpus)
# todo co-compile
# https://coral.ai/docs/edgetpu/compiler/#co-compiling-multiple-models
model = scrypted_sdk.zip.open(
'fs/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite').read()
# face_model = scrypted_sdk.zip.open(
# 'fs/mobilenet_ssd_v2_face_quant_postprocess.tflite').read()
self.interpreter = make_interpreter(model)
for idx, edge_tpu in enumerate(edge_tpus):
try:
interpreter = make_interpreter(edgetpuFile, ":%s" % idx)
interpreter.allocate_tensors()
_, height, width, channels = interpreter.get_input_details()[
0]['shape']
self.input_details = int(width), int(height), int(channels)
self.interpreters.put(interpreter)
self.interpreter_count = self.interpreter_count + 1
print('added tpu %s' % (edge_tpu))
except Exception as e:
print('unable to use Coral Edge TPU', e)
if not self.interpreter_count:
raise Exception('all tpus failed to load')
# self.face_interpreter = make_interpreter(face_model)
except Exception as e:
print('unable to use Coral Edge TPU', e)
self.edge_tpu_found = 'Edge TPU not found'
model = scrypted_sdk.zip.open(
'fs/mobilenet_ssd_v2_coco_quant_postprocess.tflite').read()
# face_model = scrypted_sdk.zip.open(
# 'fs/mobilenet_ssd_v2_face_quant_postprocess.tflite').read()
self.interpreter = tflite.Interpreter(model_content=model)
interpreter = tflite.Interpreter(model_path=tfliteFile)
interpreter.allocate_tensors()
_, height, width, channels = interpreter.get_input_details()[
0]['shape']
self.input_details = int(width), int(height), int(channels)
self.interpreters.put(interpreter)
self.interpreter_count = self.interpreter_count + 1
# self.face_interpreter = make_interpreter(face_model)
self.interpreter.allocate_tensors()
self.mutex = threading.Lock()
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.interpreter_count, thread_name_prefix="tflite", )
async def getSettings(self) -> list[Setting]:
ret = await super().getSettings()
@@ -83,29 +107,32 @@ class TensorFlowLitePlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted
# width, height, channels
def get_input_details(self) -> Tuple[int, int, int]:
with self.mutex:
_, height, width, channels = self.interpreter.get_input_details()[
0]['shape']
return int(width), int(height), int(channels)
return self.input_details
def get_input_size(self) -> Tuple[float, float]:
return input_size(self.interpreter)
def get_input_size(self) -> Tuple[int, int]:
return self.input_details[0:2]
def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
try:
with self.mutex:
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
def predict():
interpreter = self.interpreters.get()
try:
common.set_input(
self.interpreter, input)
interpreter, input)
scale = (1, 1)
# _, scale = common.set_resized_input(
# self.interpreter, cropped.size, lambda size: cropped.resize(size, Image.ANTIALIAS))
self.interpreter.invoke()
interpreter.invoke()
objs = detect.get_objects(
self.interpreter, score_threshold=.2, image_scale=scale)
except:
print('tensorflow-lite encountered an error while detecting. requesting plugin restart.')
self.requestRestart()
raise e
interpreter, score_threshold=.2, image_scale=scale)
return objs
except:
print('tensorflow-lite encountered an error while detecting. requesting plugin restart.')
self.requestRestart()
raise e
finally:
self.interpreters.put(interpreter)
objs = await asyncio.get_event_loop().run_in_executor(self.executor, predict)
allowList = settings.get('allowList', None) if settings else None
ret = self.create_detection_result(objs, src_size, allowList, cvss)

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/tensorflow-lite",
"version": "0.1.3",
"version": "0.1.4",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/tensorflow-lite",
"version": "0.1.3",
"version": "0.1.4",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -41,5 +41,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.3"
"version": "0.1.4"
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/webrtc",
"version": "0.1.36",
"version": "0.1.37",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/webrtc",
"version": "0.1.36",
"version": "0.1.37",
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/webrtc",
"version": "0.1.36",
"version": "0.1.37",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",
"prescrypted-setup-project": "scrypted-package-json",

View File

@@ -115,7 +115,8 @@ class WebRTCMixin extends SettingsMixinDeviceBase<RTCSignalingClient & VideoCame
const device = systemManager.getDeviceById<VideoCamera & Intercom>(this.id);
const hasIntercom = this.mixinDeviceInterfaces.includes(ScryptedInterface.Intercom);
const mo = await sdk.mediaManager.createMediaObject(device, ScryptedMimeTypes.ScryptedDevice, {
const requestMediaStream: RequestMediaStream = async options => device.getVideoStream(options);
const mo = await mediaManager.createMediaObject(requestMediaStream, ScryptedMimeTypes.RequestMediaStream, {
sourceId: device.id,
});
@@ -126,7 +127,7 @@ class WebRTCMixin extends SettingsMixinDeviceBase<RTCSignalingClient & VideoCame
mo,
this.plugin.storageSettings.values.maximumCompatibilityMode,
this.plugin.getRTCConfiguration(),
await this.plugin.getWeriftConfiguration(),
await this.plugin.getWeriftConfiguration(options?.disableTurn),
);
}
@@ -409,7 +410,7 @@ export class WebRTCPlugin extends AutoenableMixinProvider implements DeviceCreat
};
}
async getWeriftConfiguration(): Promise<Partial<PeerConfig>> {
async getWeriftConfiguration(disableTurn?: boolean): Promise<Partial<PeerConfig>> {
let ret: Partial<PeerConfig>;
if (this.storageSettings.values.weriftConfiguration) {
try {
@@ -420,7 +421,7 @@ export class WebRTCPlugin extends AutoenableMixinProvider implements DeviceCreat
}
}
const iceServers = this.storageSettings.values.useTurnServer
const iceServers = this.storageSettings.values.useTurnServer && !disableTurn
? [weriftStunServer, weriftTurnServer]
: [weriftStunServer];

View File

@@ -233,7 +233,7 @@ class HttpResponseOptions(TypedDict):
class ImageOptions(TypedDict):
crop: Any
format: Any | Any | Any
format: Any | Any | Any | Any
resize: Any
pass
@@ -486,6 +486,7 @@ class ObjectDetectionGeneratorSession(TypedDict):
class ObjectDetectionModel(TypedDict):
classes: list[str]
inputFormat: Any | Any | Any
inputSize: list[float]
name: str
settings: list[Setting]
@@ -693,7 +694,7 @@ class VideoClipOptions(TypedDict):
class VideoFrameGeneratorOptions(TypedDict):
crop: Any
format: Any | Any | Any
format: Any | Any | Any | Any
resize: Any
pass

View File

@@ -1296,6 +1296,7 @@ export interface ObjectDetectionSession extends ObjectDetectionGeneratorSession
export interface ObjectDetectionModel extends ObjectDetectionTypes {
name: string;
inputSize?: number[];
inputFormat?: 'gray' | 'rgb' | 'rgba';
settings: Setting[];
triggerClasses?: string[];
}
@@ -1328,7 +1329,7 @@ export interface ImageOptions {
width?: number,
height?: number,
};
format?: 'rgba' | 'rgb' | 'jpg';
format?: 'gray' | 'rgba' | 'rgb' | 'jpg';
}
export interface Image {
width: number;
@@ -1942,7 +1943,14 @@ export interface RTCSignalingOptions {
*/
offer?: RTCSessionDescriptionInit;
requiresOffer?: boolean;
/**
* Disables trickle ICE. All candidates must be sent in the initial offer/answer sdp.
*/
disableTrickle?: boolean;
/**
* Disables usage of TURN servers, if this client exposes public addresses or provides its own.
*/
disableTurn?: boolean;
/**
* Hint to proxy the feed, as the target client may be inflexible.
*/

View File

@@ -28,7 +28,7 @@
"${workspaceFolder}/**/*.js"
],
"env": {
"SCRYPTED_PYTHON_PATH": "python3.9",
"SCRYPTED_PYTHON_PATH": "python3.10",
// "SCRYPTED_SHARED_WORKER": "true",
// "SCRYPTED_DISABLE_AUTHENTICATION": "true",
// "DEBUG": "*",

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/server",
"version": "0.6.26",
"version": "0.7.11",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/server",
"version": "0.6.26",
"version": "0.7.11",
"license": "ISC",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.10",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/server",
"version": "0.7.5",
"version": "0.7.12",
"description": "",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.10",

View File

@@ -8,6 +8,7 @@ import platform
import shutil
import subprocess
import threading
import concurrent.futures
import time
import traceback
import zipfile
@@ -35,6 +36,24 @@ class SystemDeviceState(TypedDict):
value: any
class StreamPipeReader:
def __init__(self, conn: multiprocessing.connection.Connection) -> None:
self.conn = conn
self.executor = concurrent.futures.ThreadPoolExecutor()
def readBlocking(self, n):
b = bytes(0)
while len(b) < n:
self.conn.poll()
add = os.read(self.conn.fileno(), n - len(b))
if not len(add):
raise Exception('unable to read requested bytes')
b += add
return b
async def read(self, n):
return await asyncio.get_event_loop().run_in_executor(self.executor, lambda: self.readBlocking(n))
class SystemManager(scrypted_python.scrypted_sdk.types.SystemManager):
def __init__(self, api: Any, systemState: Mapping[str, Mapping[str, SystemDeviceState]]) -> None:
super().__init__()
@@ -466,15 +485,24 @@ class PluginRemote:
schedule_exit_check()
async def getFork():
fd = os.dup(parent_conn.fileno())
forkPeer, readLoop = await rpc_reader.prepare_peer_readloop(self.loop, fd, fd)
reader = StreamPipeReader(parent_conn)
forkPeer, readLoop = await rpc_reader.prepare_peer_readloop(self.loop, reader = reader, writeFd = parent_conn.fileno())
forkPeer.peerName = 'thread'
async def updateStats(stats):
allMemoryStats[forkPeer] = stats
forkPeer.params['updateStats'] = updateStats
async def forkReadLoop():
try:
await readLoop()
except:
# traceback.print_exc()
print('fork read loop exited')
pass
finally:
allMemoryStats.pop(forkPeer)
parent_conn.close()
reader.executor.shutdown()
asyncio.run_coroutine_threadsafe(forkReadLoop(), loop=self.loop)
getRemote = await forkPeer.getParam('getRemote')
remote: PluginRemote = await getRemote(self.api, self.pluginId, self.hostInfo)
@@ -563,13 +591,19 @@ class PluginRemote:
async def getServicePort(self, name):
pass
async def plugin_async_main(loop: AbstractEventLoop, readFd: int, writeFd: int):
peer, readLoop = await rpc_reader.prepare_peer_readloop(loop, readFd, writeFd)
allMemoryStats = {}
async def plugin_async_main(loop: AbstractEventLoop, readFd: int = None, writeFd: int = None, reader: asyncio.StreamReader = None, writer: asyncio.StreamWriter = None):
peer, readLoop = await rpc_reader.prepare_peer_readloop(loop, readFd=readFd, writeFd=writeFd, reader=reader, writer=writer)
peer.params['print'] = print
peer.params['getRemote'] = lambda api, pluginId, hostInfo: PluginRemote(peer, api, pluginId, hostInfo, loop)
async def get_update_stats():
update_stats = await peer.getParam('updateStats')
if not update_stats:
print('host did not provide update_stats')
return
def stats_runner():
ptime = round(time.process_time() * 1000000)
@@ -584,8 +618,12 @@ async def plugin_async_main(loop: AbstractEventLoop, readFd: int, writeFd: int):
resource.RUSAGE_SELF).ru_maxrss
except:
heapTotal = 0
for _, stats in allMemoryStats.items():
ptime += stats['cpu']['user']
heapTotal += stats['memoryUsage']['heapTotal']
stats = {
'type': 'stats',
'cpu': {
'user': ptime,
'system': 0,
@@ -601,10 +639,14 @@ async def plugin_async_main(loop: AbstractEventLoop, readFd: int, writeFd: int):
asyncio.run_coroutine_threadsafe(get_update_stats(), loop)
await readLoop()
try:
await readLoop()
finally:
if reader and hasattr(reader, 'executor'):
r: StreamPipeReader = reader
r.executor.shutdown()
def main(readFd: int, writeFd: int):
def main(readFd: int = None, writeFd: int = None, reader: asyncio.StreamReader = None, writer: asyncio.StreamWriter = None):
loop = asyncio.new_event_loop()
def gc_runner():
@@ -612,10 +654,10 @@ def main(readFd: int, writeFd: int):
loop.call_later(10, gc_runner)
gc_runner()
loop.run_until_complete(plugin_async_main(loop, readFd, writeFd))
loop.run_until_complete(plugin_async_main(loop, readFd=readFd, writeFd=writeFd, reader=reader, writer=writer))
loop.close()
def plugin_main(readFd: int, writeFd: int):
def plugin_main(readFd: int = None, writeFd: int = None, reader: asyncio.StreamReader = None, writer: asyncio.StreamWriter = None):
try:
import gi
gi.require_version('Gst', '1.0')
@@ -624,17 +666,18 @@ def plugin_main(readFd: int, writeFd: int):
loop = GLib.MainLoop()
worker = threading.Thread(target=main, args=(readFd, writeFd), name="asyncio-main")
worker = threading.Thread(target=main, args=(readFd, writeFd, reader, writer), name="asyncio-main")
worker.start()
loop.run()
except:
main(readFd, writeFd)
main(readFd=readFd, writeFd=writeFd, reader=reader, writer=writer)
def plugin_fork(conn: multiprocessing.connection.Connection):
fd = os.dup(conn.fileno())
plugin_main(fd, fd)
reader = StreamPipeReader(conn)
plugin_main(reader=reader, writeFd=fd)
if __name__ == "__main__":
plugin_main(3, 4)

View File

@@ -29,6 +29,8 @@ class RPCResultError(Exception):
def __init__(self, caught, message):
self.caught = caught
self.message = message
self.name = None
self.stack = None
class RpcSerializer:
@@ -121,6 +123,16 @@ class RpcPeer:
self.killed = False
def __apply__(self, proxyId: str, oneWayMethods: List[str], method: str, args: list):
oneway = oneWayMethods and method in oneWayMethods
if self.killed:
future = Future()
if oneway:
future.set_result(None)
return future
future.set_exception(RPCResultError(None, 'RpcPeer has been killed (apply) ' + str(method)))
return future
serializationContext: Dict = {}
serializedArgs = []
for arg in args:
@@ -134,7 +146,7 @@ class RpcPeer:
'method': method,
}
if oneWayMethods and method in oneWayMethods:
if oneway:
rpcApply['oneway'] = True
self.send(rpcApply, None, serializationContext)
future = Future()
@@ -472,12 +484,13 @@ class RpcPeer:
pass
async def createPendingResult(self, cb: Callable[[str, Callable[[Exception], None]], None]):
# if (Object.isFrozen(this.pendingResults))
# return Promise.reject(new RPCResultError('RpcPeer has been killed'));
future = Future()
if self.killed:
future.set_exception(RPCResultError(None, 'RpcPeer has been killed (createPendingResult)'))
return future
id = str(self.idCounter)
self.idCounter = self.idCounter + 1
future = Future()
self.pendingResults[id] = future
await cb(id, lambda e: future.set_exception(RPCResultError(e, None)))
return await future

View File

@@ -385,7 +385,7 @@ export abstract class MediaManagerBase implements MediaManager {
node[candidateId] = inputWeight + outputWeight;
}
catch (e) {
console.warn(converter.name, 'skipping converter due to error', e)
console.warn(candidate.name, 'skipping converter due to error', e)
}
}

View File

@@ -141,7 +141,8 @@ export class PluginHostAPI extends PluginAPIManagedListeners implements PluginAP
for (const upsert of deviceManifest.devices) {
upsert.providerNativeId = deviceManifest.providerNativeId;
await this.pluginHost.upsertDevice(upsert);
const id = await this.pluginHost.upsertDevice(upsert);
this.scrypted.getDevice(id)?.probe().catch(() => { });
}
}