Compare commits

..

5 Commits

Author SHA1 Message Date
Koushik Dutta
212883e84b server: probe one off discovered devices after creation 2023-04-28 07:34:21 -07:00
Koushik Dutta
1200537d62 cloud: support default login 2023-04-27 23:42:54 -07:00
Koushik Dutta
5f6adc9449 predict: publish 2023-04-27 21:53:50 -07:00
Koushik Dutta
7d17236ca7 server: fix prepublishOnly script 2023-04-27 10:31:13 -07:00
Koushik Dutta
028401362a postrelease 2023-04-27 10:30:59 -07:00
21 changed files with 32 additions and 450 deletions

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/cloud",
"version": "0.1.13",
"version": "0.1.14",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/cloud",
"version": "0.1.13",
"version": "0.1.14",
"dependencies": {
"@eneris/push-receiver": "^3.1.4",
"@scrypted/common": "file:../../common",
@@ -44,7 +44,7 @@
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.2.82",
"version": "0.2.97",
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.18.6",

View File

@@ -55,5 +55,5 @@
"@types/nat-upnp": "^1.1.2",
"@types/node": "^18.11.18"
},
"version": "0.1.13"
"version": "0.1.14"
}

View File

@@ -7,9 +7,8 @@ import { once } from 'events';
import http from 'http';
import HttpProxy from 'http-proxy';
import https from 'https';
import throttle from "lodash/throttle";
import upnp from 'nat-upnp';
import net, { AddressInfo } from 'net';
import net from 'net';
import os from 'os';
import path from 'path';
import qs from 'query-string';
@@ -210,6 +209,11 @@ class ScryptedCloud extends ScryptedDeviceBase implements OauthClient, Settings,
})
this.updateCors();
if (!this.storageSettings.values.token_info && process.env.SCRYPTED_CLOUD_TOKEN) {
this.storageSettings.values.token_info = process.env.SCRYPTED_CLOUD_TOKEN;
this.manager.registrationId.then(r => this.sendRegistrationId(r));
}
}
scheduleRefreshPortForward() {

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/coreml",
"version": "0.1.13",
"version": "0.1.14",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/coreml",
"version": "0.1.13",
"version": "0.1.14",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -40,5 +40,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.13"
"version": "0.1.14"
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/openvino",
"version": "0.1.15",
"version": "0.1.16",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/openvino",
"version": "0.1.15",
"version": "0.1.16",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -39,5 +39,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.15"
"version": "0.1.16"
}

1
plugins/openvino/src/detect Symbolic link
View File

@@ -0,0 +1 @@
../../tensorflow-lite/src/detect

View File

@@ -1,77 +0,0 @@
from __future__ import annotations
import asyncio
from typing import Any, Tuple
import scrypted_sdk
from scrypted_sdk.types import (MediaObject, ObjectDetection,
ObjectDetectionGeneratorSession,
ObjectDetectionModel, ObjectDetectionSession,
ObjectsDetected, ScryptedMimeTypes, Setting)
class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
self.loop = asyncio.get_event_loop()
def getClasses(self) -> list[str]:
pass
def getTriggerClasses(self) -> list[str]:
pass
def get_input_details(self) -> Tuple[int, int, int]:
pass
def get_input_format(self) -> str:
pass
def getModelSettings(self, settings: Any = None) -> list[Setting]:
return []
async def getDetectionModel(self, settings: Any = None) -> ObjectDetectionModel:
d: ObjectDetectionModel = {
'name': self.pluginId,
'classes': self.getClasses(),
'triggerClasses': self.getTriggerClasses(),
'inputSize': self.get_input_details(),
'inputFormat': self.get_input_format(),
'settings': [],
}
d['settings'] += self.getModelSettings(settings)
return d
def get_detection_input_size(self, src_size):
pass
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
pass
async def generateObjectDetections(self, videoFrames: Any, session: ObjectDetectionGeneratorSession = None) -> Any:
try:
videoFrames = await scrypted_sdk.sdk.connectRPCObject(videoFrames)
async for videoFrame in videoFrames:
videoFrame = await scrypted_sdk.sdk.connectRPCObject(videoFrame)
detected = await self.run_detection_videoframe(videoFrame, session)
yield {
'__json_copy_serialize_children': True,
'detected': detected,
'videoFrame': videoFrame,
}
finally:
try:
await videoFrames.aclose()
except:
pass
async def detectObjects(self, mediaObject: MediaObject, session: ObjectDetectionSession = None) -> ObjectsDetected:
vf: scrypted_sdk.VideoFrame
if mediaObject and mediaObject.mimeType == ScryptedMimeTypes.Image.value:
vf = await scrypted_sdk.sdk.connectRPCObject(mediaObject)
else:
vf = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.Image.value)
return await self.run_detection_videoframe(vf, session)

View File

@@ -1,24 +0,0 @@
import threading
import asyncio
async def run_coro_threadsafe(coro, other_loop, our_loop = None):
"""Schedules coro in other_loop, awaits until coro has run and returns
its result.
"""
loop = our_loop or asyncio.get_event_loop()
# schedule coro safely in other_loop, get a concurrent.future back
# NOTE run_coroutine_threadsafe requires Python 3.5.1
fut = asyncio.run_coroutine_threadsafe(coro, other_loop)
# set up a threading.Event that fires when the future is finished
finished = threading.Event()
def fut_finished_cb(_):
finished.set()
fut.add_done_callback(fut_finished_cb)
# wait on that event in an executor, yielding control to our_loop
await loop.run_in_executor(None, finished.wait)
# coro's result is now available in the future object
return fut.result()

View File

@@ -0,0 +1 @@
../../tensorflow-lite/src/predict

View File

@@ -1,298 +0,0 @@
from __future__ import annotations
import asyncio
import concurrent.futures
import os
import re
import urllib.request
from typing import Any, List, Tuple
import scrypted_sdk
from PIL import Image
from scrypted_sdk.types import (ObjectDetectionResult, ObjectDetectionSession,
ObjectsDetected, Setting)
from detect import DetectPlugin
from .rectangle import (Rectangle, combine_rect, from_bounding_box,
intersect_area, intersect_rect, to_bounding_box)
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
toThreadExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="image")
async def to_thread(f):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(toThreadExecutor, f)
async def ensureRGBData(data: bytes, size: Tuple[int, int], format: str):
if format != 'rgba':
return Image.frombuffer('RGB', size, data)
def convert():
rgba = Image.frombuffer('RGBA', size, data)
try:
return rgba.convert('RGB')
finally:
rgba.close()
return await to_thread(convert)
def parse_label_contents(contents: str):
lines = contents.splitlines()
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
ret[int(pair[0])] = pair[1].strip()
else:
ret[row_number] = content.strip()
return ret
def is_same_box(bb1, bb2, threshold = .7):
r1 = from_bounding_box(bb1)
r2 = from_bounding_box(bb2)
ia = intersect_area(r1, r2)
if not ia:
return False, None
a1 = bb1[2] * bb1[3]
a2 = bb2[2] * bb2[3]
# if area intersect area is too small, these are different boxes
if ia / a1 < threshold or ia / a2 < threshold:
return False, None
l = min(bb1[0], bb2[0])
t = min(bb1[1], bb2[1])
r = max(bb1[0] + bb1[2], bb2[0] + bb2[2])
b = max(bb1[1] + bb1[3], bb2[1] + bb2[3])
w = r - l
h = b - t
return True, (l, t, w, h)
def is_same_detection(d1: ObjectDetectionResult, d2: ObjectDetectionResult):
if d1['className'] != d2['className']:
return False, None
return is_same_box(d1['boundingBox'], d2['boundingBox'])
def dedupe_detections(input: List[ObjectDetectionResult], is_same_detection = is_same_detection):
input = input.copy()
detections = []
while len(input):
d = input.pop()
found = False
for c in detections:
same, box = is_same_detection(d, c)
if same:
# encompass this box and score
d['boundingBox'] = box
d['score'] = max(d['score'], c['score'])
# remove from current detections list
detections = list(filter(lambda r: r != c, detections))
# run dedupe again with this new larger item
input.append(d)
found = True
break
if not found:
detections.append(d)
return detections
class Prediction:
def __init__(self, id: int, score: float, bbox: Tuple[float, float, float, float]):
self.id = id
self.score = score
self.bbox = bbox
class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings):
labels: dict
def downloadFile(self, url: str, filename: str):
filesPath = os.path.join(os.environ['SCRYPTED_PLUGIN_VOLUME'], 'files')
fullpath = os.path.join(filesPath, filename)
if os.path.isfile(fullpath):
return fullpath
os.makedirs(filesPath, exist_ok=True)
tmp = fullpath + '.tmp'
urllib.request.urlretrieve(url, tmp)
os.rename(tmp, fullpath)
return fullpath
def getClasses(self) -> list[str]:
return list(self.labels.values())
def getTriggerClasses(self) -> list[str]:
return ['motion']
def requestRestart(self):
asyncio.ensure_future(scrypted_sdk.deviceManager.requestRestart())
# width, height, channels
def get_input_details(self) -> Tuple[int, int, int]:
pass
def getModelSettings(self, settings: Any = None) -> list[Setting]:
return []
def create_detection_result(self, objs: List[Prediction], size, convert_to_src_size=None) -> ObjectsDetected:
detections: List[ObjectDetectionResult] = []
detection_result: ObjectsDetected = {}
detection_result['detections'] = detections
detection_result['inputDimensions'] = size
for obj in objs:
className = self.labels.get(obj.id, obj.id)
detection: ObjectDetectionResult = {}
detection['boundingBox'] = (
obj.bbox.xmin, obj.bbox.ymin, obj.bbox.xmax - obj.bbox.xmin, obj.bbox.ymax - obj.bbox.ymin)
detection['className'] = className
detection['score'] = obj.score
detections.append(detection)
if convert_to_src_size:
detections = detection_result['detections']
detection_result['detections'] = []
for detection in detections:
bb = detection['boundingBox']
x, y = convert_to_src_size((bb[0], bb[1]))
x2, y2 = convert_to_src_size(
(bb[0] + bb[2], bb[1] + bb[3]))
detection['boundingBox'] = (x, y, x2 - x + 1, y2 - y + 1)
detection_result['detections'].append(detection)
# print(detection_result)
return detection_result
def get_detection_input_size(self, src_size):
# signals to pipeline that any input size is fine
# previous code used to resize to correct size and run detection that way.
# new code will resize the frame and potentially do multiple passes.
# this is useful for high quality thumbnails.
return (None, None)
def get_input_size(self) -> Tuple[int, int]:
pass
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
pass
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
settings = detection_session and detection_session.get('settings')
src_size = videoFrame.width, videoFrame.height
w, h = self.get_input_size()
input_aspect_ratio = w / h
iw, ih = src_size
src_aspect_ratio = iw / ih
ws = w / iw
hs = h / ih
s = max(ws, hs)
# image is already correct aspect ratio, so it can be processed in a single pass.
if input_aspect_ratio == src_aspect_ratio:
def cvss(point):
return point[0] / s, point[1] / s
# aspect ratio matches, but image must be scaled.
resize = None
if ih != w:
resize = {
'width': w,
'height': h,
}
data = await videoFrame.toBuffer({
'resize': resize,
'format': videoFrame.format or 'rgb',
})
image = await ensureRGBData(data, (w, h), videoFrame.format)
try:
ret = await self.detect_once(image, settings, src_size, cvss)
return ret
finally:
image.close()
sw = int(w / s)
sh = int(h / s)
first_crop = (0, 0, sw, sh)
ow = iw - sw
oh = ih - sh
second_crop = (ow, oh, ow + sw, oh + sh)
firstData, secondData = await asyncio.gather(
videoFrame.toBuffer({
'resize': {
'width': w,
'height': h,
},
'crop': {
'left': 0,
'top': 0,
'width': sw,
'height': sh,
},
'format': videoFrame.format or 'rgb',
}),
videoFrame.toBuffer({
'resize': {
'width': w,
'height': h,
},
'crop': {
'left': ow,
'top': oh,
'width': sw,
'height': sh,
},
'format': videoFrame.format or 'rgb',
})
)
first, second = await asyncio.gather(
ensureRGBData(firstData, (w, h), videoFrame.format),
ensureRGBData(secondData, (w, h), videoFrame.format)
)
def cvss1(point):
return point[0] / s, point[1] / s
def cvss2(point):
return point[0] / s + ow, point[1] / s + oh
ret1 = await self.detect_once(first, settings, src_size, cvss1)
first.close()
ret2 = await self.detect_once(second, settings, src_size, cvss2)
second.close()
two_intersect = intersect_rect(Rectangle(*first_crop), Rectangle(*second_crop))
def is_same_detection_middle(d1: ObjectDetectionResult, d2: ObjectDetectionResult):
same, ret = is_same_detection(d1, d2)
if same:
return same, ret
if d1['className'] != d2['className']:
return False, None
r1 = from_bounding_box(d1['boundingBox'])
m1 = intersect_rect(two_intersect, r1)
if not m1:
return False, None
r2 = from_bounding_box(d2['boundingBox'])
m2 = intersect_rect(two_intersect, r2)
if not m2:
return False, None
same, ret = is_same_box(to_bounding_box(m1), to_bounding_box(m2))
if not same:
return False, None
c = to_bounding_box(combine_rect(r1, r2))
return True, c
ret = ret1
ret['detections'] = dedupe_detections(ret1['detections'] + ret2['detections'], is_same_detection=is_same_detection_middle)
return ret

View File

@@ -1,27 +0,0 @@
from collections import namedtuple
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
def intersect_rect(a: Rectangle, b: Rectangle):
x1 = max(min(a.xmin, a.xmax), min(b.xmin, b.xmax))
y1 = max(min(a.ymin, a.ymax), min(b.ymin, b.ymax))
x2 = min(max(a.xmin, a.xmax), max(b.xmin, b.xmax))
y2 = min(max(a.ymin, a.ymax), max(b.ymin, b.ymax))
if x1<x2 and y1<y2:
return Rectangle(x1, y1, x2, y2)
def combine_rect(a: Rectangle, b: Rectangle):
return Rectangle(min(a.xmin, b.xmin), min(a.ymin, b.ymin), max(a.xmax, b.xmax), max(a.ymax, b.ymax))
def intersect_area(a: Rectangle, b: Rectangle):
intersect = intersect_rect(a, b)
if intersect:
dx = intersect.xmax - intersect.xmin
dy = intersect.ymax - intersect.ymin
return dx * dy
def to_bounding_box(rect: Rectangle):
return (rect.xmin, rect.ymin, rect.xmax - rect.xmin, rect.ymax - rect.ymin)
def from_bounding_box(bb):
return Rectangle(bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3])

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/tensorflow-lite",
"version": "0.1.13",
"version": "0.1.14",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/tensorflow-lite",
"version": "0.1.13",
"version": "0.1.14",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -43,5 +43,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.13"
"version": "0.1.14"
}

View File

@@ -200,7 +200,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
# image is already correct aspect ratio, so it can be processed in a single pass.
if input_aspect_ratio == src_aspect_ratio:
def cvss(point):
return point[0], point[1]
return point[0] / s, point[1] / s
# aspect ratio matches, but image must be scaled.
resize = None

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/tensorflow-lite",
"version": "0.1.13",
"version": "0.1.14",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/tensorflow-lite",
"version": "0.1.13",
"version": "0.1.14",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -40,5 +40,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.13"
"version": "0.1.14"
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/server",
"version": "0.7.92",
"version": "0.7.95",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/server",
"version": "0.7.92",
"version": "0.7.95",
"license": "ISC",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.10",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/server",
"version": "0.7.93",
"version": "0.7.95",
"description": "",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.10",
@@ -72,7 +72,7 @@
"beta": "npm publish --tag beta",
"postbeta": "npm version patch && git add package.json && npm run build && git commit -m postbeta",
"release": "npm publish",
"prepublish": "npm run build",
"prepublishOnly": "npm run build",
"postrelease": "git tag v$npm_package_version && git push origin v$npm_package_version && npm version patch && git add package.json && git commit -m postrelease",
"docker": "scripts/github-workflow-publish-docker.sh"
},

View File

@@ -147,7 +147,9 @@ export class PluginHostAPI extends PluginAPIManagedListeners implements PluginAP
}
async onDeviceDiscovered(device: Device) {
return this.pluginHost.upsertDevice(device);
const id = await this.pluginHost.upsertDevice(device);
this.scrypted.getDevice(id)?.probe().catch(() => { });
return id;
}
async onDeviceRemoved(nativeId: string) {