mirror of
https://github.com/koush/scrypted.git
synced 2026-02-07 16:02:13 +00:00
Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e2e65f93af | ||
|
|
b271567428 | ||
|
|
a88a295d9a | ||
|
|
38ba31ca7d | ||
|
|
1c8ff2493b | ||
|
|
5c9f62e6b6 | ||
|
|
6fd8018c52 | ||
|
|
d900ddf5f1 | ||
|
|
e3a8d311ce | ||
|
|
8bbc3d5470 | ||
|
|
00cf987cec | ||
|
|
7e5dcae64a | ||
|
|
cb67237d7c |
@@ -103,7 +103,11 @@ then
|
||||
fi
|
||||
|
||||
RUN python$PYTHON_VERSION -m pip install --upgrade pip
|
||||
RUN python$PYTHON_VERSION -m pip install aiofiles debugpy typing_extensions typing opencv-python psutil
|
||||
if [ "$PYTHON_VERSION" != "3.10" ]
|
||||
then
|
||||
RUN python$PYTHON_VERSION -m pip install typing
|
||||
fi
|
||||
RUN python$PYTHON_VERSION -m pip install aiofiles debugpy typing_extensions opencv-python psutil
|
||||
|
||||
echo "Installing Scrypted Launch Agent..."
|
||||
|
||||
|
||||
2836
plugins/alexa/package-lock.json
generated
2836
plugins/alexa/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/alexa",
|
||||
"version": "0.2.1",
|
||||
"version": "0.2.3",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
"prescrypted-setup-project": "scrypted-package-json",
|
||||
@@ -39,6 +39,6 @@
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^18.4.2",
|
||||
"@scrypted/sdk": "^0.2.70"
|
||||
"@scrypted/sdk": "../../sdk"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,8 @@ export class AlexaSignalingSession implements RTCSignalingSession {
|
||||
sdp: this.directive.payload.offer.value,
|
||||
},
|
||||
disableTrickle: true,
|
||||
// this could be a low resolutions creen, no way of knowning, so never send a 1080p stream
|
||||
disableTurn: true,
|
||||
// this could be a low resolution screen, no way of knowning, so never send a 1080p stream
|
||||
screen: {
|
||||
devicePixelRatio: 1,
|
||||
width: 1280,
|
||||
|
||||
4
plugins/coreml/package-lock.json
generated
4
plugins/coreml/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/coreml",
|
||||
"version": "0.0.26",
|
||||
"version": "0.0.27",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/coreml",
|
||||
"version": "0.0.26",
|
||||
"version": "0.0.27",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -41,5 +41,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.0.26"
|
||||
"version": "0.0.27"
|
||||
}
|
||||
|
||||
@@ -6,6 +6,10 @@ from predict import PredictPlugin, Prediction, Rectangle
|
||||
import coremltools as ct
|
||||
import os
|
||||
from PIL import Image
|
||||
import asyncio
|
||||
import concurrent.futures
|
||||
|
||||
predictExecutor = concurrent.futures.ThreadPoolExecutor(2, "CoreML-Predict")
|
||||
|
||||
def parse_label_contents(contents: str):
|
||||
lines = contents.splitlines()
|
||||
@@ -36,6 +40,7 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
labels_contents = scrypted_sdk.zip.open(
|
||||
'fs/coco_labels.txt').read().decode('utf8')
|
||||
self.labels = parse_label_contents(labels_contents)
|
||||
self.loop = asyncio.get_event_loop()
|
||||
|
||||
# width, height, channels
|
||||
def get_input_details(self) -> Tuple[int, int, int]:
|
||||
@@ -44,8 +49,12 @@ class CoreMLPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
def get_input_size(self) -> Tuple[float, float]:
|
||||
return (self.inputwidth, self.inputheight)
|
||||
|
||||
def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
|
||||
out_dict = self.model.predict({'image': input, 'confidenceThreshold': .2 })
|
||||
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
|
||||
# run in executor if this is the plugin loop
|
||||
if asyncio.get_event_loop() is self.loop:
|
||||
out_dict = await asyncio.get_event_loop().run_in_executor(predictExecutor, lambda: self.model.predict({'image': input, 'confidenceThreshold': .2 }))
|
||||
else:
|
||||
out_dict = self.model.predict({'image': input, 'confidenceThreshold': .2 })
|
||||
|
||||
coordinatesList = out_dict['coordinates']
|
||||
|
||||
|
||||
4
plugins/objectdetector/package-lock.json
generated
4
plugins/objectdetector/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/objectdetector",
|
||||
"version": "0.0.103",
|
||||
"version": "0.0.106",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/objectdetector",
|
||||
"version": "0.0.103",
|
||||
"version": "0.0.106",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/objectdetector",
|
||||
"version": "0.0.103",
|
||||
"version": "0.0.106",
|
||||
"description": "Scrypted Video Analysis Plugin. Installed alongside a detection service like OpenCV or TensorFlow.",
|
||||
"author": "Scrypted",
|
||||
"license": "Apache-2.0",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import sdk, { VideoFrameGenerator, Camera, DeviceState, EventListenerRegister, MediaObject, MixinDeviceBase, MixinProvider, MotionSensor, ObjectDetection, ObjectDetectionCallbacks, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedNativeId, Setting, Settings, SettingValue, VideoCamera } from '@scrypted/sdk';
|
||||
import sdk, { ScryptedMimeTypes, Image, VideoFrame, VideoFrameGenerator, Camera, DeviceState, EventListenerRegister, MediaObject, MixinDeviceBase, MixinProvider, MotionSensor, ObjectDetection, ObjectDetectionCallbacks, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedNativeId, Setting, Settings, SettingValue, VideoCamera } from '@scrypted/sdk';
|
||||
import { StorageSettings } from '@scrypted/sdk/storage-settings';
|
||||
import crypto from 'crypto';
|
||||
import cloneDeep from 'lodash/cloneDeep';
|
||||
@@ -50,6 +50,20 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
detections = new Map<string, MediaObject>();
|
||||
cameraDevice: ScryptedDevice & Camera & VideoCamera & MotionSensor & ObjectDetector;
|
||||
storageSettings = new StorageSettings(this, {
|
||||
newPipeline: {
|
||||
title: 'Video Pipeline',
|
||||
description: 'Configure how frames are provided to the video analysis pipeline.',
|
||||
async onGet() {
|
||||
return {
|
||||
choices: [
|
||||
'Default',
|
||||
'Snapshot',
|
||||
...getAllDevices().filter(d => d.interfaces.includes(ScryptedInterface.VideoFrameGenerator)).map(d => d.name),
|
||||
],
|
||||
}
|
||||
},
|
||||
defaultValue: 'Default',
|
||||
},
|
||||
motionSensorSupplementation: {
|
||||
title: 'Built-In Motion Sensor',
|
||||
description: `This camera has a built in motion sensor. Using ${this.objectDetection.name} may be unnecessary and will use additional CPU. Replace will ignore the built in motion sensor. Filter will verify the motion sent by built in motion sensor. The Default is ${BUILTIN_MOTION_SENSOR_REPLACE}.`,
|
||||
@@ -473,22 +487,64 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
if (this.detectorRunning)
|
||||
return;
|
||||
|
||||
const stream = await this.cameraDevice.getVideoStream({
|
||||
destination: 'local-recorder',
|
||||
// ask rebroadcast to mute audio, not needed.
|
||||
audio: null,
|
||||
});
|
||||
|
||||
this.detectorRunning = true;
|
||||
this.analyzeStop = Date.now() + this.getDetectionDuration();
|
||||
|
||||
const videoFrameGenerator = this.newPipeline as VideoFrameGenerator;
|
||||
const newPipeline = this.newPipeline;
|
||||
let generator : () => Promise<AsyncGenerator<VideoFrame & MediaObject>>;
|
||||
if (newPipeline === 'Snapshot') {
|
||||
const self = this;
|
||||
generator = async () => (async function* gen() {
|
||||
try {
|
||||
while (self.detectorRunning) {
|
||||
const now = Date.now();
|
||||
const sleeper = async () => {
|
||||
const diff = now + 1100 - Date.now();
|
||||
if (diff > 0)
|
||||
await sleep(diff);
|
||||
};
|
||||
let image: MediaObject & VideoFrame;
|
||||
try {
|
||||
const mo = await self.cameraDevice.takePicture({
|
||||
reason: 'event',
|
||||
});
|
||||
image = await sdk.mediaManager.convertMediaObject(mo, ScryptedMimeTypes.Image);
|
||||
}
|
||||
catch (e) {
|
||||
self.console.error('Video analysis snapshot failed. Will retry in a moment.');
|
||||
await sleeper();
|
||||
continue;
|
||||
}
|
||||
|
||||
// self.console.log('yield')
|
||||
yield image;
|
||||
// self.console.log('done yield')
|
||||
await sleeper();
|
||||
}
|
||||
}
|
||||
finally {
|
||||
self.console.log('Snapshot generation finished.');
|
||||
}
|
||||
})();
|
||||
}
|
||||
else {
|
||||
const videoFrameGenerator = systemManager.getDeviceById<VideoFrameGenerator>(newPipeline);
|
||||
if (!videoFrameGenerator)
|
||||
throw new Error('invalid VideoFrameGenerator');
|
||||
const stream = await this.cameraDevice.getVideoStream({
|
||||
destination: 'local-recorder',
|
||||
// ask rebroadcast to mute audio, not needed.
|
||||
audio: null,
|
||||
});
|
||||
|
||||
generator = async () => videoFrameGenerator.generateVideoFrames(stream);
|
||||
}
|
||||
|
||||
try {
|
||||
const start = Date.now();
|
||||
let detections = 0;
|
||||
for await (const detected
|
||||
of await this.objectDetection.generateObjectDetections(await videoFrameGenerator.generateVideoFrames(stream), {
|
||||
of await this.objectDetection.generateObjectDetections(await generator(), {
|
||||
settings: this.getCurrentSettings(),
|
||||
})) {
|
||||
if (!this.detectorRunning) {
|
||||
@@ -849,8 +905,18 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
}
|
||||
|
||||
get newPipeline() {
|
||||
return this.plugin.storageSettings.values.newPipeline;
|
||||
}
|
||||
if (!this.plugin.storageSettings.values.newPipeline)
|
||||
return;
|
||||
|
||||
const newPipeline = this.storageSettings.values.newPipeline;
|
||||
if (!newPipeline)
|
||||
return newPipeline;
|
||||
if (newPipeline === 'Snapshot')
|
||||
return newPipeline;
|
||||
const pipelines = getAllDevices().filter(d => d.interfaces.includes(ScryptedInterface.VideoFrameGenerator));
|
||||
const found = pipelines.find(p => p.name === newPipeline);
|
||||
return found?.id || pipelines[0]?.id;
|
||||
}
|
||||
|
||||
async getMixinSettings(): Promise<Setting[]> {
|
||||
const settings: Setting[] = [];
|
||||
@@ -872,7 +938,8 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
|
||||
}
|
||||
|
||||
this.storageSettings.settings.motionSensorSupplementation.hide = !this.hasMotionType || !this.mixinDeviceInterfaces.includes(ScryptedInterface.MotionSensor);
|
||||
this.storageSettings.settings.captureMode.hide = this.hasMotionType || !!this.newPipeline;
|
||||
this.storageSettings.settings.captureMode.hide = this.hasMotionType || !!this.plugin.storageSettings.values.newPipeline;
|
||||
this.storageSettings.settings.newPipeline.hide = this.hasMotionType || !this.plugin.storageSettings.values.newPipeline;
|
||||
this.storageSettings.settings.detectionDuration.hide = this.hasMotionType;
|
||||
this.storageSettings.settings.detectionTimeout.hide = this.hasMotionType;
|
||||
this.storageSettings.settings.motionDuration.hide = !this.hasMotionType;
|
||||
@@ -1141,8 +1208,7 @@ class ObjectDetectionPlugin extends AutoenableMixinProvider implements Settings
|
||||
newPipeline: {
|
||||
title: 'New Video Pipeline',
|
||||
description: 'WARNING! DO NOT ENABLE: Use the new video pipeline. Leave blank to use the legacy pipeline.',
|
||||
type: 'device',
|
||||
deviceFilter: `interfaces.includes('${ScryptedInterface.VideoFrameGenerator}')`,
|
||||
type: 'boolean',
|
||||
},
|
||||
activeMotionDetections: {
|
||||
title: 'Active Motion Detection Sessions',
|
||||
|
||||
4
plugins/python-codecs/package-lock.json
generated
4
plugins/python-codecs/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/python-codecs",
|
||||
"version": "0.1.3",
|
||||
"version": "0.1.5",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/python-codecs",
|
||||
"version": "0.1.3",
|
||||
"version": "0.1.5",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/python-codecs",
|
||||
"version": "0.1.3",
|
||||
"version": "0.1.5",
|
||||
"description": "Python Codecs for Scrypted",
|
||||
"keywords": [
|
||||
"scrypted",
|
||||
@@ -25,7 +25,7 @@
|
||||
"runtime": "python",
|
||||
"type": "API",
|
||||
"interfaces": [
|
||||
"VideoFrameGenerator"
|
||||
"DeviceProvider"
|
||||
]
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -16,8 +16,12 @@ except:
|
||||
|
||||
class Callback:
|
||||
def __init__(self, callback) -> None:
|
||||
self.loop = asyncio.get_running_loop()
|
||||
self.callback = callback
|
||||
if callback:
|
||||
self.loop = asyncio.get_running_loop()
|
||||
self.callback = callback
|
||||
else:
|
||||
self.loop = None
|
||||
self.callback = None
|
||||
|
||||
def createPipelineIterator(pipeline: str):
|
||||
pipeline = '{pipeline} ! queue leaky=downstream max-size-buffers=0 ! appsink name=appsink emit-signals=true sync=false max-buffers=-1 drop=true'.format(pipeline=pipeline)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import time
|
||||
from gstreamer import createPipelineIterator
|
||||
import asyncio
|
||||
from util import optional_chain
|
||||
@@ -5,10 +6,9 @@ import scrypted_sdk
|
||||
from typing import Any
|
||||
from urllib.parse import urlparse
|
||||
import pyvips
|
||||
import threading
|
||||
import traceback
|
||||
import concurrent.futures
|
||||
|
||||
Gst = None
|
||||
try:
|
||||
import gi
|
||||
gi.require_version('Gst', '1.0')
|
||||
@@ -18,6 +18,14 @@ try:
|
||||
except:
|
||||
pass
|
||||
|
||||
av = None
|
||||
try:
|
||||
import av
|
||||
av.logging.set_level(av.logging.PANIC)
|
||||
except:
|
||||
pass
|
||||
|
||||
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
|
||||
vipsExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="vips")
|
||||
|
||||
async def to_thread(f):
|
||||
@@ -97,20 +105,100 @@ async def createVipsMediaObject(image: VipsImage):
|
||||
})
|
||||
return ret
|
||||
|
||||
class PythonCodecs(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.VideoFrameGenerator):
|
||||
class LibavGenerator(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.VideoFrameGenerator):
|
||||
async def generateVideoFrames(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
|
||||
worker = scrypted_sdk.fork()
|
||||
forked: CodecFork = await worker.result
|
||||
return await forked.generateVideoFramesLibav(mediaObject, options, filter)
|
||||
|
||||
class GstreamerGenerator(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.VideoFrameGenerator):
|
||||
async def generateVideoFrames(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
|
||||
worker = scrypted_sdk.fork()
|
||||
forked: CodecFork = await worker.result
|
||||
return await forked.generateVideoFramesGstreamer(mediaObject, options, filter)
|
||||
|
||||
class PythonCodecs(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.DeviceProvider):
|
||||
def __init__(self, nativeId = None):
|
||||
super().__init__(nativeId)
|
||||
|
||||
async def generateVideoFrames(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
|
||||
worker = scrypted_sdk.fork()
|
||||
forked = await worker.result
|
||||
return await forked.generateVideoFrames(mediaObject, options, filter)
|
||||
asyncio.ensure_future(self.initialize())
|
||||
|
||||
async def initialize(self):
|
||||
manifest: scrypted_sdk.DeviceManifest = {
|
||||
'devices': [],
|
||||
}
|
||||
if Gst:
|
||||
gstDevice: scrypted_sdk.Device = {
|
||||
'name': 'Gstreamer',
|
||||
'nativeId': 'gstreamer',
|
||||
'interfaces': [
|
||||
scrypted_sdk.ScryptedInterface.VideoFrameGenerator.value,
|
||||
],
|
||||
'type': scrypted_sdk.ScryptedDeviceType.API.value,
|
||||
}
|
||||
manifest['devices'].append(gstDevice)
|
||||
|
||||
if av:
|
||||
avDevice: scrypted_sdk.Device = {
|
||||
'name': 'Libav',
|
||||
'nativeId': 'libav',
|
||||
'interfaces': [
|
||||
scrypted_sdk.ScryptedInterface.VideoFrameGenerator.value,
|
||||
],
|
||||
'type': scrypted_sdk.ScryptedDeviceType.API.value,
|
||||
}
|
||||
manifest['devices'].append(avDevice)
|
||||
|
||||
await scrypted_sdk.deviceManager.onDevicesChanged(manifest)
|
||||
|
||||
def getDevice(self, nativeId: str) -> Any:
|
||||
if nativeId == 'gstreamer':
|
||||
return GstreamerGenerator('gstreamer')
|
||||
if nativeId == 'libav':
|
||||
return LibavGenerator('libav')
|
||||
|
||||
def create_scrypted_plugin():
|
||||
return PythonCodecs()
|
||||
|
||||
async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
|
||||
ffmpegInput: scrypted_sdk.FFmpegInput = await scrypted_sdk.mediaManager.convertMediaObjectToJSON(mediaObject, scrypted_sdk.ScryptedMimeTypes.FFmpegInput.value)
|
||||
videosrc = ffmpegInput.get('url')
|
||||
container = av.open(videosrc, options = options)
|
||||
# none of this stuff seems to work. might be libav being slow with rtsp.
|
||||
# container.no_buffer = True
|
||||
# container.options['-analyzeduration'] = '0'
|
||||
# container.options['-probesize'] = '500000'
|
||||
stream = container.streams.video[0]
|
||||
# stream.codec_context.thread_count = 1
|
||||
# stream.codec_context.low_delay = True
|
||||
# stream.codec_context.options['-analyzeduration'] = '0'
|
||||
# stream.codec_context.options['-probesize'] = '500000'
|
||||
|
||||
async def generateVideoFrames(mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
|
||||
start = 0
|
||||
try:
|
||||
for idx, frame in enumerate(container.decode(stream)):
|
||||
now = time.time()
|
||||
if not start:
|
||||
start = now
|
||||
elapsed = now - start
|
||||
if (frame.time or 0) < elapsed - 0.500:
|
||||
# print('too slow, skipping frame')
|
||||
continue
|
||||
# print(frame)
|
||||
vips = pyvips.Image.new_from_array(frame.to_ndarray(format='rgb24'))
|
||||
vipsImage = VipsImage(vips)
|
||||
try:
|
||||
mo = await createVipsMediaObject(VipsImage(vips))
|
||||
yield mo
|
||||
finally:
|
||||
vipsImage.vipsImage.invalidate()
|
||||
vipsImage.vipsImage = None
|
||||
|
||||
finally:
|
||||
container.close()
|
||||
|
||||
|
||||
async def generateVideoFramesGstreamer(mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
|
||||
ffmpegInput: scrypted_sdk.FFmpegInput = await scrypted_sdk.mediaManager.convertMediaObjectToJSON(mediaObject, scrypted_sdk.ScryptedMimeTypes.FFmpegInput.value)
|
||||
container = ffmpegInput.get('container', None)
|
||||
videosrc = ffmpegInput.get('url')
|
||||
@@ -157,9 +245,18 @@ async def generateVideoFrames(mediaObject: scrypted_sdk.MediaObject, options: sc
|
||||
gst_buffer.unmap(info)
|
||||
|
||||
class CodecFork:
|
||||
async def generateVideoFrames(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
|
||||
async def generateVideoFramesGstreamer(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
|
||||
try:
|
||||
async for data in generateVideoFrames(mediaObject, options, filter):
|
||||
async for data in generateVideoFramesGstreamer(mediaObject, options, filter):
|
||||
yield data
|
||||
finally:
|
||||
import os
|
||||
os._exit(os.EX_OK)
|
||||
pass
|
||||
|
||||
async def generateVideoFramesLibav(self, mediaObject: scrypted_sdk.MediaObject, options: scrypted_sdk.VideoFrameGeneratorOptions = None, filter: Any = None) -> scrypted_sdk.VideoFrame:
|
||||
try:
|
||||
async for data in generateVideoFramesLibav(mediaObject, options, filter):
|
||||
yield data
|
||||
finally:
|
||||
import os
|
||||
|
||||
4
plugins/tensorflow-lite/package-lock.json
generated
4
plugins/tensorflow-lite/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.0.112",
|
||||
"version": "0.0.113",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/tensorflow-lite",
|
||||
"version": "0.0.112",
|
||||
"version": "0.0.113",
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
}
|
||||
|
||||
@@ -44,5 +44,5 @@
|
||||
"devDependencies": {
|
||||
"@scrypted/sdk": "file:../../sdk"
|
||||
},
|
||||
"version": "0.0.112"
|
||||
"version": "0.0.113"
|
||||
}
|
||||
|
||||
@@ -209,11 +209,11 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
|
||||
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame) -> ObjectsDetected:
|
||||
pass
|
||||
|
||||
def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
async def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
pil: Image.Image = avframe.to_image()
|
||||
return self.run_detection_image(detection_session, pil, settings, src_size, convert_to_src_size)
|
||||
return await self.run_detection_image(detection_session, pil, settings, src_size, convert_to_src_size)
|
||||
|
||||
def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
async def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
|
||||
pass
|
||||
|
||||
def run_detection_crop(self, detection_session: DetectionSession, sample: Any, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected:
|
||||
@@ -335,7 +335,7 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
|
||||
finally:
|
||||
detection_session.running = False
|
||||
else:
|
||||
return self.run_detection_jpeg(detection_session, bytes(await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, 'image/jpeg')), settings)
|
||||
return await self.run_detection_jpeg(detection_session, bytes(await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, 'image/jpeg')), settings)
|
||||
|
||||
if not create:
|
||||
# a detection session may have been created, but not started
|
||||
@@ -479,7 +479,7 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
|
||||
if not current_data:
|
||||
raise Exception('no sample')
|
||||
|
||||
detection_result = self.run_detection_crop(
|
||||
detection_result = await self.run_detection_crop(
|
||||
detection_session, current_data, detection_session.settings, current_src_size, current_convert_to_src_size, boundingBox)
|
||||
|
||||
return detection_result['detections']
|
||||
@@ -493,7 +493,7 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
|
||||
first_frame = False
|
||||
print("first frame received", detection_session.id)
|
||||
|
||||
detection_result, data = run_detection(
|
||||
detection_result, data = await run_detection(
|
||||
detection_session, sample, detection_session.settings, src_size, convert_to_src_size)
|
||||
if detection_result:
|
||||
detection_result['running'] = True
|
||||
|
||||
@@ -250,13 +250,13 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
# print(detection_result)
|
||||
return detection_result
|
||||
|
||||
def run_detection_jpeg(self, detection_session: PredictSession, image_bytes: bytes, settings: Any) -> ObjectsDetected:
|
||||
async def run_detection_jpeg(self, detection_session: PredictSession, image_bytes: bytes, settings: Any) -> ObjectsDetected:
|
||||
stream = io.BytesIO(image_bytes)
|
||||
image = Image.open(stream)
|
||||
if image.mode == 'RGBA':
|
||||
image = image.convert('RGB')
|
||||
|
||||
detections, _ = self.run_detection_image(detection_session, image, settings, image.size)
|
||||
detections, _ = await self.run_detection_image(detection_session, image, settings, image.size)
|
||||
return detections
|
||||
|
||||
def get_detection_input_size(self, src_size):
|
||||
@@ -269,7 +269,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
def get_input_size(self) -> Tuple[int, int]:
|
||||
pass
|
||||
|
||||
def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
|
||||
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
|
||||
pass
|
||||
|
||||
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, settings: Any) -> ObjectsDetected:
|
||||
@@ -288,7 +288,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
})
|
||||
image = Image.frombuffer('RGB', (w, h), data)
|
||||
try:
|
||||
ret = self.detect_once(image, settings, src_size, cvss)
|
||||
ret = await self.detect_once(image, settings, src_size, cvss)
|
||||
return ret
|
||||
finally:
|
||||
image.close()
|
||||
@@ -339,9 +339,9 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
def cvss2(point, normalize=False):
|
||||
return point[0] / s + ow, point[1] / s + oh, True
|
||||
|
||||
ret1 = self.detect_once(first, settings, src_size, cvss1)
|
||||
ret1 = await self.detect_once(first, settings, src_size, cvss1)
|
||||
first.close()
|
||||
ret2 = self.detect_once(second, settings, src_size, cvss2)
|
||||
ret2 = await self.detect_once(second, settings, src_size, cvss2)
|
||||
second.close()
|
||||
|
||||
two_intersect = intersect_rect(Rectangle(*first_crop), Rectangle(*second_crop))
|
||||
@@ -374,7 +374,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
ret['detections'] = dedupe_detections(ret1['detections'] + ret2['detections'], is_same_detection=is_same_detection_middle)
|
||||
return ret
|
||||
|
||||
def run_detection_image(self, detection_session: PredictSession, image: Image.Image, settings: Any, src_size, convert_to_src_size: Any = None, multipass_crop: Tuple[float, float, float, float] = None):
|
||||
async def run_detection_image(self, detection_session: PredictSession, image: Image.Image, settings: Any, src_size, convert_to_src_size: Any = None, multipass_crop: Tuple[float, float, float, float] = None):
|
||||
(w, h) = self.get_input_size() or image.size
|
||||
(iw, ih) = image.size
|
||||
|
||||
@@ -448,7 +448,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
converted = convert_to_src_size(unscaled, normalize) if convert_to_src_size else (unscaled[0], unscaled[1], True)
|
||||
return converted
|
||||
|
||||
ret = self.detect_once(input, settings, src_size, cvss)
|
||||
ret = await self.detect_once(input, settings, src_size, cvss)
|
||||
input.close()
|
||||
detection_session.processed = detection_session.processed + 1
|
||||
return ret, RawImage(image)
|
||||
@@ -461,7 +461,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
converted = convert_to_src_size(point, normalize) if convert_to_src_size else (point[0], point[1], True)
|
||||
return converted
|
||||
|
||||
ret = self.detect_once(image, settings, src_size, cvss)
|
||||
ret = await self.detect_once(image, settings, src_size, cvss)
|
||||
if detection_session:
|
||||
detection_session.processed = detection_session.processed + 1
|
||||
else:
|
||||
@@ -483,11 +483,11 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
converted = convert_to_src_size(unscaled, normalize) if convert_to_src_size else (unscaled[0], unscaled[1], True)
|
||||
return converted
|
||||
|
||||
ret1 = self.detect_once(first, settings, src_size, cvss1)
|
||||
ret1 = await self.detect_once(first, settings, src_size, cvss1)
|
||||
first.close()
|
||||
if detection_session:
|
||||
detection_session.processed = detection_session.processed + 1
|
||||
ret2 = self.detect_once(second, settings, src_size, cvss2)
|
||||
ret2 = await self.detect_once(second, settings, src_size, cvss2)
|
||||
if detection_session:
|
||||
detection_session.processed = detection_session.processed + 1
|
||||
second.close()
|
||||
@@ -576,11 +576,11 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
# print('untracked %s: %s' % (d['className'], d['score']))
|
||||
|
||||
|
||||
def run_detection_crop(self, detection_session: DetectionSession, sample: RawImage, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected:
|
||||
(ret, _) = self.run_detection_image(detection_session, sample.image, settings, src_size, convert_to_src_size, bounding_box)
|
||||
async def run_detection_crop(self, detection_session: DetectionSession, sample: RawImage, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected:
|
||||
(ret, _) = await self.run_detection_image(detection_session, sample.image, settings, src_size, convert_to_src_size, bounding_box)
|
||||
return ret
|
||||
|
||||
def run_detection_gstsample(self, detection_session: PredictSession, gstsample, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Image.Image]:
|
||||
async def run_detection_gstsample(self, detection_session: PredictSession, gstsample, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Image.Image]:
|
||||
caps = gstsample.get_caps()
|
||||
# can't trust the width value, compute the stride
|
||||
height = caps.get_structure(0).get_value('height')
|
||||
@@ -604,7 +604,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
|
||||
gst_buffer.unmap(info)
|
||||
|
||||
try:
|
||||
return self.run_detection_image(detection_session, image, settings, src_size, convert_to_src_size)
|
||||
return await self.run_detection_image(detection_session, image, settings, src_size, convert_to_src_size)
|
||||
except:
|
||||
image.close()
|
||||
traceback.print_exc()
|
||||
|
||||
@@ -3,7 +3,6 @@ import threading
|
||||
from .common import *
|
||||
from PIL import Image
|
||||
from pycoral.adapters import detect
|
||||
from pycoral.adapters.common import input_size
|
||||
loaded_py_coral = False
|
||||
try:
|
||||
from pycoral.utils.edgetpu import list_edge_tpus
|
||||
@@ -19,6 +18,9 @@ import scrypted_sdk
|
||||
from scrypted_sdk.types import Setting
|
||||
from typing import Any, Tuple
|
||||
from predict import PredictPlugin
|
||||
import concurrent.futures
|
||||
import queue
|
||||
import asyncio
|
||||
|
||||
def parse_label_contents(contents: str):
|
||||
lines = contents.splitlines()
|
||||
@@ -41,6 +43,9 @@ class TensorFlowLitePlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted
|
||||
labels_contents = scrypted_sdk.zip.open(
|
||||
'fs/coco_labels.txt').read().decode('utf8')
|
||||
self.labels = parse_label_contents(labels_contents)
|
||||
self.interpreters = queue.Queue()
|
||||
self.interpreter_count = 0
|
||||
|
||||
try:
|
||||
edge_tpus = list_edge_tpus()
|
||||
print('edge tpus', edge_tpus)
|
||||
@@ -53,7 +58,21 @@ class TensorFlowLitePlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted
|
||||
'fs/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite').read()
|
||||
# face_model = scrypted_sdk.zip.open(
|
||||
# 'fs/mobilenet_ssd_v2_face_quant_postprocess.tflite').read()
|
||||
self.interpreter = make_interpreter(model)
|
||||
for idx, edge_tpu in enumerate(edge_tpus):
|
||||
try:
|
||||
interpreter = make_interpreter(model, ":%s" % idx)
|
||||
interpreter.allocate_tensors()
|
||||
_, height, width, channels = interpreter.get_input_details()[
|
||||
0]['shape']
|
||||
self.input_details = int(width), int(height), int(channels)
|
||||
self.interpreters.put(interpreter)
|
||||
self.interpreter_count = self.interpreter_count + 1
|
||||
print('added tpu %s' % (edge_tpu))
|
||||
except Exception as e:
|
||||
print('unable to use Coral Edge TPU', e)
|
||||
|
||||
if not self.interpreter_count:
|
||||
raise Exception('all tpus failed to load')
|
||||
# self.face_interpreter = make_interpreter(face_model)
|
||||
except Exception as e:
|
||||
print('unable to use Coral Edge TPU', e)
|
||||
@@ -62,10 +81,16 @@ class TensorFlowLitePlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted
|
||||
'fs/mobilenet_ssd_v2_coco_quant_postprocess.tflite').read()
|
||||
# face_model = scrypted_sdk.zip.open(
|
||||
# 'fs/mobilenet_ssd_v2_face_quant_postprocess.tflite').read()
|
||||
self.interpreter = tflite.Interpreter(model_content=model)
|
||||
interpreter = tflite.Interpreter(model_content=model)
|
||||
interpreter.allocate_tensors()
|
||||
_, height, width, channels = interpreter.get_input_details()[
|
||||
0]['shape']
|
||||
self.input_details = int(width), int(height), int(channels)
|
||||
self.interpreters.put(interpreter)
|
||||
self.interpreter_count = self.interpreter_count + 1
|
||||
# self.face_interpreter = make_interpreter(face_model)
|
||||
self.interpreter.allocate_tensors()
|
||||
self.mutex = threading.Lock()
|
||||
|
||||
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.interpreter_count, thread_name_prefix="tflite", )
|
||||
|
||||
async def getSettings(self) -> list[Setting]:
|
||||
ret = await super().getSettings()
|
||||
@@ -83,30 +108,32 @@ class TensorFlowLitePlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted
|
||||
|
||||
# width, height, channels
|
||||
def get_input_details(self) -> Tuple[int, int, int]:
|
||||
with self.mutex:
|
||||
_, height, width, channels = self.interpreter.get_input_details()[
|
||||
0]['shape']
|
||||
return int(width), int(height), int(channels)
|
||||
return self.input_details
|
||||
|
||||
def get_input_size(self) -> Tuple[int, int]:
|
||||
w, h = input_size(self.interpreter)
|
||||
return int(w), int(h)
|
||||
return self.input_details[0:2]
|
||||
|
||||
def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
|
||||
try:
|
||||
with self.mutex:
|
||||
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
|
||||
def predict():
|
||||
interpreter = self.interpreters.get()
|
||||
try:
|
||||
common.set_input(
|
||||
self.interpreter, input)
|
||||
interpreter, input)
|
||||
scale = (1, 1)
|
||||
# _, scale = common.set_resized_input(
|
||||
# self.interpreter, cropped.size, lambda size: cropped.resize(size, Image.ANTIALIAS))
|
||||
self.interpreter.invoke()
|
||||
interpreter.invoke()
|
||||
objs = detect.get_objects(
|
||||
self.interpreter, score_threshold=.2, image_scale=scale)
|
||||
except:
|
||||
print('tensorflow-lite encountered an error while detecting. requesting plugin restart.')
|
||||
self.requestRestart()
|
||||
raise e
|
||||
interpreter, score_threshold=.2, image_scale=scale)
|
||||
return objs
|
||||
except:
|
||||
print('tensorflow-lite encountered an error while detecting. requesting plugin restart.')
|
||||
self.requestRestart()
|
||||
raise e
|
||||
finally:
|
||||
self.interpreters.put(interpreter)
|
||||
|
||||
objs = await asyncio.get_event_loop().run_in_executor(self.executor, predict)
|
||||
|
||||
allowList = settings.get('allowList', None) if settings else None
|
||||
ret = self.create_detection_result(objs, src_size, allowList, cvss)
|
||||
|
||||
4
plugins/webrtc/package-lock.json
generated
4
plugins/webrtc/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/webrtc",
|
||||
"version": "0.1.36",
|
||||
"version": "0.1.37",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/webrtc",
|
||||
"version": "0.1.36",
|
||||
"version": "0.1.37",
|
||||
"dependencies": {
|
||||
"@scrypted/common": "file:../../common",
|
||||
"@scrypted/sdk": "file:../../sdk",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/webrtc",
|
||||
"version": "0.1.36",
|
||||
"version": "0.1.37",
|
||||
"scripts": {
|
||||
"scrypted-setup-project": "scrypted-setup-project",
|
||||
"prescrypted-setup-project": "scrypted-package-json",
|
||||
|
||||
@@ -115,7 +115,8 @@ class WebRTCMixin extends SettingsMixinDeviceBase<RTCSignalingClient & VideoCame
|
||||
const device = systemManager.getDeviceById<VideoCamera & Intercom>(this.id);
|
||||
const hasIntercom = this.mixinDeviceInterfaces.includes(ScryptedInterface.Intercom);
|
||||
|
||||
const mo = await sdk.mediaManager.createMediaObject(device, ScryptedMimeTypes.ScryptedDevice, {
|
||||
const requestMediaStream: RequestMediaStream = async options => device.getVideoStream(options);
|
||||
const mo = await mediaManager.createMediaObject(requestMediaStream, ScryptedMimeTypes.RequestMediaStream, {
|
||||
sourceId: device.id,
|
||||
});
|
||||
|
||||
@@ -126,7 +127,7 @@ class WebRTCMixin extends SettingsMixinDeviceBase<RTCSignalingClient & VideoCame
|
||||
mo,
|
||||
this.plugin.storageSettings.values.maximumCompatibilityMode,
|
||||
this.plugin.getRTCConfiguration(),
|
||||
await this.plugin.getWeriftConfiguration(),
|
||||
await this.plugin.getWeriftConfiguration(options?.disableTurn),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -409,7 +410,7 @@ export class WebRTCPlugin extends AutoenableMixinProvider implements DeviceCreat
|
||||
};
|
||||
}
|
||||
|
||||
async getWeriftConfiguration(): Promise<Partial<PeerConfig>> {
|
||||
async getWeriftConfiguration(disableTurn?: boolean): Promise<Partial<PeerConfig>> {
|
||||
let ret: Partial<PeerConfig>;
|
||||
if (this.storageSettings.values.weriftConfiguration) {
|
||||
try {
|
||||
@@ -420,7 +421,7 @@ export class WebRTCPlugin extends AutoenableMixinProvider implements DeviceCreat
|
||||
}
|
||||
}
|
||||
|
||||
const iceServers = this.storageSettings.values.useTurnServer
|
||||
const iceServers = this.storageSettings.values.useTurnServer && !disableTurn
|
||||
? [weriftStunServer, weriftTurnServer]
|
||||
: [weriftStunServer];
|
||||
|
||||
|
||||
@@ -1942,7 +1942,14 @@ export interface RTCSignalingOptions {
|
||||
*/
|
||||
offer?: RTCSessionDescriptionInit;
|
||||
requiresOffer?: boolean;
|
||||
/**
|
||||
* Disables trickle ICE. All candidates must be sent in the initial offer/answer sdp.
|
||||
*/
|
||||
disableTrickle?: boolean;
|
||||
/**
|
||||
* Disables usage of TURN servers, if this client exposes public addresses or provides its own.
|
||||
*/
|
||||
disableTurn?: boolean;
|
||||
/**
|
||||
* Hint to proxy the feed, as the target client may be inflexible.
|
||||
*/
|
||||
|
||||
2
server/.vscode/launch.json
vendored
2
server/.vscode/launch.json
vendored
@@ -28,7 +28,7 @@
|
||||
"${workspaceFolder}/**/*.js"
|
||||
],
|
||||
"env": {
|
||||
"SCRYPTED_PYTHON_PATH": "python3.9",
|
||||
"SCRYPTED_PYTHON_PATH": "python3.10",
|
||||
// "SCRYPTED_SHARED_WORKER": "true",
|
||||
// "SCRYPTED_DISABLE_AUTHENTICATION": "true",
|
||||
// "DEBUG": "*",
|
||||
|
||||
4
server/package-lock.json
generated
4
server/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@scrypted/server",
|
||||
"version": "0.7.8",
|
||||
"version": "0.7.9",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@scrypted/server",
|
||||
"version": "0.7.8",
|
||||
"version": "0.7.9",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@mapbox/node-pre-gyp": "^1.0.10",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@scrypted/server",
|
||||
"version": "0.7.9",
|
||||
"version": "0.7.10",
|
||||
"description": "",
|
||||
"dependencies": {
|
||||
"@mapbox/node-pre-gyp": "^1.0.10",
|
||||
|
||||
@@ -141,7 +141,8 @@ export class PluginHostAPI extends PluginAPIManagedListeners implements PluginAP
|
||||
|
||||
for (const upsert of deviceManifest.devices) {
|
||||
upsert.providerNativeId = deviceManifest.providerNativeId;
|
||||
await this.pluginHost.upsertDevice(upsert);
|
||||
const id = await this.pluginHost.upsertDevice(upsert);
|
||||
this.scrypted.getDevice(id)?.probe().catch(() => { });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user