Compare commits

..

12 Commits

Author SHA1 Message Date
Koushik Dutta
4246e3c476 server: filter link local addresses 2023-04-26 22:47:33 -07:00
Koushik Dutta
3fce0838f1 Merge branch 'main' of github.com:koush/scrypted 2023-04-26 18:40:27 -07:00
Koushik Dutta
2609e301fe python-codecs: fix gray conversion 2023-04-26 18:40:22 -07:00
Koushik Dutta
f4737bf2ac docker: fix stupid bash/zsh issue 2023-04-26 10:22:55 -07:00
Koushik Dutta
fc102aa526 postbeta 2023-04-26 09:56:27 -07:00
Koushik Dutta
9ef33e156f docker: pass through /dev/dri in compose 2023-04-26 09:40:07 -07:00
Koushik Dutta
881865a0cb docker: add intel opencl driver 2023-04-26 09:22:16 -07:00
Koushik Dutta
be5643cc53 openvino: fix bufferconvertor 2023-04-25 22:35:41 -07:00
Koushik Dutta
7e6eba1596 openvino: initial release 2023-04-25 21:56:07 -07:00
Koushik Dutta
27dde776a6 rebroadcast: further settings cleanups 2023-04-25 18:46:38 -07:00
Koushik Dutta
b24159a22a rebroadcast: strip out legacy containers 2023-04-25 18:32:11 -07:00
Koushik Dutta
b6c242b9d5 postrelease 2023-04-25 14:11:58 -07:00
27 changed files with 950 additions and 278 deletions

View File

@@ -24,6 +24,13 @@ RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
RUN apt-get -y update
RUN apt-get -y install libedgetpu1-std
# intel opencl gpu for openvino
RUN if [ "$(uname -m)" = "x86_64" ]; \
then \
apt-get -y install \
intel-opencl-icd; \
fi
RUN apt-get -y install software-properties-common apt-utils
RUN apt-get -y update
RUN apt-get -y upgrade

View File

@@ -32,14 +32,14 @@ services:
restart: unless-stopped
network_mode: host
# uncomment this and a line below as needed.
# devices:
# zwave usb serial device
# - /dev/ttyACM0:/dev/ttyACM0
# all usb devices, such as coral tpu
# - /dev/bus/usb:/dev/bus/usb
# intel hardware accelerated video decoding
# - /dev/dri:/dev/dri
devices:
# hardware accelerated video decoding, opencl, etc.
- /dev/dri:/dev/dri
# uncomment below as necessary.
# zwave usb serial device
# - /dev/ttyACM0:/dev/ttyACM0
# all usb devices, such as coral tpu
# - /dev/bus/usb:/dev/bus/usb
volumes:
- ~/.scrypted/volume:/server/volume

View File

@@ -21,6 +21,13 @@ RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
RUN apt-get -y update
RUN apt-get -y install libedgetpu1-std
# intel opencl gpu for openvino
RUN if [ "$(uname -m)" = "x86_64" ]; \
then \
apt-get -y install \
intel-opencl-icd; \
fi
RUN apt-get -y install software-properties-common apt-utils
RUN apt-get -y update
RUN apt-get -y upgrade

6
plugins/openvino/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
.DS_Store
out/
node_modules/
dist/
.venv
all_models*

View File

@@ -0,0 +1,15 @@
.DS_Store
out/
node_modules/
*.map
fs
src
.vscode
dist/*.js
dist/*.txt
__pycache__
all_models
sort_oh
download_models.sh
tsconfig.json
.venv

30
plugins/openvino/.vscode/launch.json vendored Normal file
View File

@@ -0,0 +1,30 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "Scrypted Debugger",
"type": "python",
"request": "attach",
"connect": {
"host": "${config:scrypted.debugHost}",
"port": 10081
},
"justMyCode": false,
"preLaunchTask": "scrypted: deploy+debug",
"pathMappings": [
{
"localRoot": "/Volumes/Dev/scrypted/server/python/",
"remoteRoot": "/Volumes/Dev/scrypted/server/python/",
},
{
"localRoot": "${workspaceFolder}/src",
"remoteRoot": "${config:scrypted.pythonRemoteRoot}"
},
]
}
]
}

21
plugins/openvino/.vscode/settings.json vendored Normal file
View File

@@ -0,0 +1,21 @@
{
// docker installation
// "scrypted.debugHost": "koushik-ubuntu",
// "scrypted.serverRoot": "/server",
// pi local installation
// "scrypted.debugHost": "192.168.2.119",
// "scrypted.serverRoot": "/home/pi/.scrypted",
// local checkout
"scrypted.debugHost": "127.0.0.1",
"scrypted.serverRoot": "/Users/koush/.scrypted",
// "scrypted.debugHost": "koushik-windows",
// "scrypted.serverRoot": "C:\\Users\\koush\\.scrypted",
"scrypted.pythonRemoteRoot": "${config:scrypted.serverRoot}/volume/plugin.zip",
"python.analysis.extraPaths": [
"./node_modules/@scrypted/sdk/types/scrypted_python"
]
}

20
plugins/openvino/.vscode/tasks.json vendored Normal file
View File

@@ -0,0 +1,20 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "scrypted: deploy+debug",
"type": "shell",
"presentation": {
"echo": true,
"reveal": "silent",
"focus": false,
"panel": "shared",
"showReuseMessage": true,
"clear": false
},
"command": "npm run scrypted-vscode-launch ${config:scrypted.debugHost}",
},
]
}

View File

@@ -0,0 +1,6 @@
# OpenVINO Object Detection for Scrypted
This plugin adds object detection capabilities to any camera in Scrypted. Having a fast GPU and CPU is highly recommended.
The OpenVINO Plugin should only be used if you are a Scrypted NVR user. It will provide no
benefits to HomeKit, which does its own detection processing.

86
plugins/openvino/package-lock.json generated Normal file
View File

@@ -0,0 +1,86 @@
{
"name": "@scrypted/openvino",
"version": "0.1.15",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/openvino",
"version": "0.1.15",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}
},
"../../sdk": {
"name": "@scrypted/sdk",
"version": "0.2.97",
"dev": true,
"license": "ISC",
"dependencies": {
"@babel/preset-typescript": "^7.18.6",
"adm-zip": "^0.4.13",
"axios": "^0.21.4",
"babel-loader": "^9.1.0",
"babel-plugin-const-enum": "^1.1.0",
"esbuild": "^0.15.9",
"ncp": "^2.0.0",
"raw-loader": "^4.0.2",
"rimraf": "^3.0.2",
"tmp": "^0.2.1",
"ts-loader": "^9.4.2",
"typescript": "^4.9.4",
"webpack": "^5.75.0",
"webpack-bundle-analyzer": "^4.5.0"
},
"bin": {
"scrypted-changelog": "bin/scrypted-changelog.js",
"scrypted-debug": "bin/scrypted-debug.js",
"scrypted-deploy": "bin/scrypted-deploy.js",
"scrypted-deploy-debug": "bin/scrypted-deploy-debug.js",
"scrypted-package-json": "bin/scrypted-package-json.js",
"scrypted-setup-project": "bin/scrypted-setup-project.js",
"scrypted-webpack": "bin/scrypted-webpack.js"
},
"devDependencies": {
"@types/node": "^18.11.18",
"@types/stringify-object": "^4.0.0",
"stringify-object": "^3.3.0",
"ts-node": "^10.4.0",
"typedoc": "^0.23.21"
}
},
"../sdk": {
"extraneous": true
},
"node_modules/@scrypted/sdk": {
"resolved": "../../sdk",
"link": true
}
},
"dependencies": {
"@scrypted/sdk": {
"version": "file:../../sdk",
"requires": {
"@babel/preset-typescript": "^7.18.6",
"@types/node": "^18.11.18",
"@types/stringify-object": "^4.0.0",
"adm-zip": "^0.4.13",
"axios": "^0.21.4",
"babel-loader": "^9.1.0",
"babel-plugin-const-enum": "^1.1.0",
"esbuild": "^0.15.9",
"ncp": "^2.0.0",
"raw-loader": "^4.0.2",
"rimraf": "^3.0.2",
"stringify-object": "^3.3.0",
"tmp": "^0.2.1",
"ts-loader": "^9.4.2",
"ts-node": "^10.4.0",
"typedoc": "^0.23.21",
"typescript": "^4.9.4",
"webpack": "^5.75.0",
"webpack-bundle-analyzer": "^4.5.0"
}
}
}
}

View File

@@ -0,0 +1,43 @@
{
"name": "@scrypted/openvino",
"description": "Scrypted OpenVINO Object Detection",
"keywords": [
"scrypted",
"plugin",
"openvino",
"motion",
"object",
"detect",
"detection",
"people",
"person"
],
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",
"prescrypted-setup-project": "scrypted-package-json",
"build": "scrypted-webpack",
"prepublishOnly": "NODE_ENV=production scrypted-webpack",
"prescrypted-vscode-launch": "scrypted-webpack",
"scrypted-vscode-launch": "scrypted-deploy-debug",
"scrypted-deploy-debug": "scrypted-deploy-debug",
"scrypted-debug": "scrypted-debug",
"scrypted-deploy": "scrypted-deploy",
"scrypted-readme": "scrypted-readme",
"scrypted-package-json": "scrypted-package-json"
},
"scrypted": {
"name": "OpenVINO Object Detection",
"pluginDependencies": [
"@scrypted/objectdetector"
],
"runtime": "python",
"type": "API",
"interfaces": [
"ObjectDetection"
]
},
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.15"
}

View File

@@ -0,0 +1,77 @@
from __future__ import annotations
import asyncio
from typing import Any, Tuple
import scrypted_sdk
from scrypted_sdk.types import (MediaObject, ObjectDetection,
ObjectDetectionGeneratorSession,
ObjectDetectionModel, ObjectDetectionSession,
ObjectsDetected, ScryptedMimeTypes, Setting)
class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
self.loop = asyncio.get_event_loop()
def getClasses(self) -> list[str]:
pass
def getTriggerClasses(self) -> list[str]:
pass
def get_input_details(self) -> Tuple[int, int, int]:
pass
def get_input_format(self) -> str:
pass
def getModelSettings(self, settings: Any = None) -> list[Setting]:
return []
async def getDetectionModel(self, settings: Any = None) -> ObjectDetectionModel:
d: ObjectDetectionModel = {
'name': self.pluginId,
'classes': self.getClasses(),
'triggerClasses': self.getTriggerClasses(),
'inputSize': self.get_input_details(),
'inputFormat': self.get_input_format(),
'settings': [],
}
d['settings'] += self.getModelSettings(settings)
return d
def get_detection_input_size(self, src_size):
pass
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
pass
async def generateObjectDetections(self, videoFrames: Any, session: ObjectDetectionGeneratorSession = None) -> Any:
try:
videoFrames = await scrypted_sdk.sdk.connectRPCObject(videoFrames)
async for videoFrame in videoFrames:
videoFrame = await scrypted_sdk.sdk.connectRPCObject(videoFrame)
detected = await self.run_detection_videoframe(videoFrame, session)
yield {
'__json_copy_serialize_children': True,
'detected': detected,
'videoFrame': videoFrame,
}
finally:
try:
await videoFrames.aclose()
except:
pass
async def detectObjects(self, mediaObject: MediaObject, session: ObjectDetectionSession = None) -> ObjectsDetected:
vf: scrypted_sdk.VideoFrame
if mediaObject and mediaObject.mimeType == ScryptedMimeTypes.Image.value:
vf = await scrypted_sdk.sdk.connectRPCObject(mediaObject)
else:
vf = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.Image.value)
return await self.run_detection_videoframe(vf, session)

View File

@@ -0,0 +1,24 @@
import threading
import asyncio
async def run_coro_threadsafe(coro, other_loop, our_loop = None):
"""Schedules coro in other_loop, awaits until coro has run and returns
its result.
"""
loop = our_loop or asyncio.get_event_loop()
# schedule coro safely in other_loop, get a concurrent.future back
# NOTE run_coroutine_threadsafe requires Python 3.5.1
fut = asyncio.run_coroutine_threadsafe(coro, other_loop)
# set up a threading.Event that fires when the future is finished
finished = threading.Event()
def fut_finished_cb(_):
finished.set()
fut.add_done_callback(fut_finished_cb)
# wait on that event in an executor, yielding control to our_loop
await loop.run_in_executor(None, finished.wait)
# coro's result is now available in the future object
return fut.result()

View File

@@ -0,0 +1,4 @@
from ov import OpenVINOPlugin
def create_scrypted_plugin():
return OpenVINOPlugin()

View File

@@ -0,0 +1,102 @@
from __future__ import annotations
import asyncio
import concurrent.futures
import os
import re
from typing import Any, Tuple
import openvino.runtime as ov
import scrypted_sdk
from PIL import Image
from scrypted_sdk.types import Setting
from predict import PredictPlugin, Prediction, Rectangle
import numpy as np
def parse_label_contents(contents: str):
lines = contents.splitlines()
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
ret[int(pair[0])] = pair[1].strip()
else:
ret[row_number] = content.strip()
return ret
class OpenVINOPlugin(PredictPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
self.core = ov.Core()
available_devices = self.core.available_devices
print('available devices: %s' % available_devices)
xmlFile = self.downloadFile('https://raw.githubusercontent.com/koush/openvino-models/main/ssd_mobilenet_v1_coco/FP16/ssd_mobilenet_v1_coco.xml', 'ssd_mobilenet_v1_coco.xml')
mappingFile = self.downloadFile('https://raw.githubusercontent.com/koush/openvino-models/main/ssd_mobilenet_v1_coco/FP16/ssd_mobilenet_v1_coco.mapping', 'ssd_mobilenet_v1_coco.mapping')
labelsFile = self.downloadFile('https://raw.githubusercontent.com/koush/openvino-models/main/ssd_mobilenet_v1_coco/FP16/ssd_mobilenet_v1_coco.bin', 'ssd_mobilenet_v1_coco.bin')
self.compiled_model = self.core.compile_model(xmlFile, "AUTO")
labelsFile = self.downloadFile('https://raw.githubusercontent.com/google-coral/test_data/master/coco_labels.txt', 'coco_labels.txt')
labels_contents = open(labelsFile, 'r').read()
self.labels = parse_label_contents(labels_contents)
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1, thread_name_prefix="openvino", )
async def getSettings(self) -> list[Setting]:
return []
# width, height, channels
def get_input_details(self) -> Tuple[int, int, int]:
return [300, 300, 3]
def get_input_size(self) -> Tuple[int, int]:
return [300, 300]
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss):
def predict():
infer_request = self.compiled_model.create_infer_request()
input_tensor = ov.Tensor(array=np.expand_dims(np.array(input), axis=0), shared_memory=True)
# Set input tensor for model with one input
infer_request.set_input_tensor(input_tensor)
infer_request.start_async()
infer_request.wait()
output = infer_request.get_output_tensor()
objs = []
for values in output.data[0][0].astype(float):
valid, index, confidence, l, t, r, b = values
if valid == -1:
break
def torelative(value: float):
return value * 300
l = torelative(l)
t = torelative(t)
r = torelative(r)
b = torelative(b)
obj = Prediction(index - 1, confidence, Rectangle(
l,
t,
r,
b
))
objs.append(obj)
return objs
try:
objs = await asyncio.get_event_loop().run_in_executor(self.executor, predict)
except:
import traceback
traceback.print_exc()
raise
ret = self.create_detection_result(objs, src_size, cvss)
return ret

View File

@@ -0,0 +1,298 @@
from __future__ import annotations
import asyncio
import concurrent.futures
import os
import re
import urllib.request
from typing import Any, List, Tuple
import scrypted_sdk
from PIL import Image
from scrypted_sdk.types import (ObjectDetectionResult, ObjectDetectionSession,
ObjectsDetected, Setting)
from detect import DetectPlugin
from .rectangle import (Rectangle, combine_rect, from_bounding_box,
intersect_area, intersect_rect, to_bounding_box)
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
toThreadExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="image")
async def to_thread(f):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(toThreadExecutor, f)
async def ensureRGBData(data: bytes, size: Tuple[int, int], format: str):
if format != 'rgba':
return Image.frombuffer('RGB', size, data)
def convert():
rgba = Image.frombuffer('RGBA', size, data)
try:
return rgba.convert('RGB')
finally:
rgba.close()
return await to_thread(convert)
def parse_label_contents(contents: str):
lines = contents.splitlines()
ret = {}
for row_number, content in enumerate(lines):
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
ret[int(pair[0])] = pair[1].strip()
else:
ret[row_number] = content.strip()
return ret
def is_same_box(bb1, bb2, threshold = .7):
r1 = from_bounding_box(bb1)
r2 = from_bounding_box(bb2)
ia = intersect_area(r1, r2)
if not ia:
return False, None
a1 = bb1[2] * bb1[3]
a2 = bb2[2] * bb2[3]
# if area intersect area is too small, these are different boxes
if ia / a1 < threshold or ia / a2 < threshold:
return False, None
l = min(bb1[0], bb2[0])
t = min(bb1[1], bb2[1])
r = max(bb1[0] + bb1[2], bb2[0] + bb2[2])
b = max(bb1[1] + bb1[3], bb2[1] + bb2[3])
w = r - l
h = b - t
return True, (l, t, w, h)
def is_same_detection(d1: ObjectDetectionResult, d2: ObjectDetectionResult):
if d1['className'] != d2['className']:
return False, None
return is_same_box(d1['boundingBox'], d2['boundingBox'])
def dedupe_detections(input: List[ObjectDetectionResult], is_same_detection = is_same_detection):
input = input.copy()
detections = []
while len(input):
d = input.pop()
found = False
for c in detections:
same, box = is_same_detection(d, c)
if same:
# encompass this box and score
d['boundingBox'] = box
d['score'] = max(d['score'], c['score'])
# remove from current detections list
detections = list(filter(lambda r: r != c, detections))
# run dedupe again with this new larger item
input.append(d)
found = True
break
if not found:
detections.append(d)
return detections
class Prediction:
def __init__(self, id: int, score: float, bbox: Tuple[float, float, float, float]):
self.id = id
self.score = score
self.bbox = bbox
class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Settings):
labels: dict
def downloadFile(self, url: str, filename: str):
filesPath = os.path.join(os.environ['SCRYPTED_PLUGIN_VOLUME'], 'files')
fullpath = os.path.join(filesPath, filename)
if os.path.isfile(fullpath):
return fullpath
os.makedirs(filesPath, exist_ok=True)
tmp = fullpath + '.tmp'
urllib.request.urlretrieve(url, tmp)
os.rename(tmp, fullpath)
return fullpath
def getClasses(self) -> list[str]:
return list(self.labels.values())
def getTriggerClasses(self) -> list[str]:
return ['motion']
def requestRestart(self):
asyncio.ensure_future(scrypted_sdk.deviceManager.requestRestart())
# width, height, channels
def get_input_details(self) -> Tuple[int, int, int]:
pass
def getModelSettings(self, settings: Any = None) -> list[Setting]:
return []
def create_detection_result(self, objs: List[Prediction], size, convert_to_src_size=None) -> ObjectsDetected:
detections: List[ObjectDetectionResult] = []
detection_result: ObjectsDetected = {}
detection_result['detections'] = detections
detection_result['inputDimensions'] = size
for obj in objs:
className = self.labels.get(obj.id, obj.id)
detection: ObjectDetectionResult = {}
detection['boundingBox'] = (
obj.bbox.xmin, obj.bbox.ymin, obj.bbox.xmax - obj.bbox.xmin, obj.bbox.ymax - obj.bbox.ymin)
detection['className'] = className
detection['score'] = obj.score
detections.append(detection)
if convert_to_src_size:
detections = detection_result['detections']
detection_result['detections'] = []
for detection in detections:
bb = detection['boundingBox']
x, y = convert_to_src_size((bb[0], bb[1]))
x2, y2 = convert_to_src_size(
(bb[0] + bb[2], bb[1] + bb[3]))
detection['boundingBox'] = (x, y, x2 - x + 1, y2 - y + 1)
detection_result['detections'].append(detection)
# print(detection_result)
return detection_result
def get_detection_input_size(self, src_size):
# signals to pipeline that any input size is fine
# previous code used to resize to correct size and run detection that way.
# new code will resize the frame and potentially do multiple passes.
# this is useful for high quality thumbnails.
return (None, None)
def get_input_size(self) -> Tuple[int, int]:
pass
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
pass
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
settings = detection_session and detection_session.get('settings')
src_size = videoFrame.width, videoFrame.height
w, h = self.get_input_size()
input_aspect_ratio = w / h
iw, ih = src_size
src_aspect_ratio = iw / ih
ws = w / iw
hs = h / ih
s = max(ws, hs)
# image is already correct aspect ratio, so it can be processed in a single pass.
if input_aspect_ratio == src_aspect_ratio:
def cvss(point):
return point[0] / s, point[1] / s
# aspect ratio matches, but image must be scaled.
resize = None
if ih != w:
resize = {
'width': w,
'height': h,
}
data = await videoFrame.toBuffer({
'resize': resize,
'format': videoFrame.format or 'rgb',
})
image = await ensureRGBData(data, (w, h), videoFrame.format)
try:
ret = await self.detect_once(image, settings, src_size, cvss)
return ret
finally:
image.close()
sw = int(w / s)
sh = int(h / s)
first_crop = (0, 0, sw, sh)
ow = iw - sw
oh = ih - sh
second_crop = (ow, oh, ow + sw, oh + sh)
firstData, secondData = await asyncio.gather(
videoFrame.toBuffer({
'resize': {
'width': w,
'height': h,
},
'crop': {
'left': 0,
'top': 0,
'width': sw,
'height': sh,
},
'format': videoFrame.format or 'rgb',
}),
videoFrame.toBuffer({
'resize': {
'width': w,
'height': h,
},
'crop': {
'left': ow,
'top': oh,
'width': sw,
'height': sh,
},
'format': videoFrame.format or 'rgb',
})
)
first, second = await asyncio.gather(
ensureRGBData(firstData, (w, h), videoFrame.format),
ensureRGBData(secondData, (w, h), videoFrame.format)
)
def cvss1(point):
return point[0] / s, point[1] / s
def cvss2(point):
return point[0] / s + ow, point[1] / s + oh
ret1 = await self.detect_once(first, settings, src_size, cvss1)
first.close()
ret2 = await self.detect_once(second, settings, src_size, cvss2)
second.close()
two_intersect = intersect_rect(Rectangle(*first_crop), Rectangle(*second_crop))
def is_same_detection_middle(d1: ObjectDetectionResult, d2: ObjectDetectionResult):
same, ret = is_same_detection(d1, d2)
if same:
return same, ret
if d1['className'] != d2['className']:
return False, None
r1 = from_bounding_box(d1['boundingBox'])
m1 = intersect_rect(two_intersect, r1)
if not m1:
return False, None
r2 = from_bounding_box(d2['boundingBox'])
m2 = intersect_rect(two_intersect, r2)
if not m2:
return False, None
same, ret = is_same_box(to_bounding_box(m1), to_bounding_box(m2))
if not same:
return False, None
c = to_bounding_box(combine_rect(r1, r2))
return True, c
ret = ret1
ret['detections'] = dedupe_detections(ret1['detections'] + ret2['detections'], is_same_detection=is_same_detection_middle)
return ret

View File

@@ -0,0 +1,27 @@
from collections import namedtuple
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
def intersect_rect(a: Rectangle, b: Rectangle):
x1 = max(min(a.xmin, a.xmax), min(b.xmin, b.xmax))
y1 = max(min(a.ymin, a.ymax), min(b.ymin, b.ymax))
x2 = min(max(a.xmin, a.xmax), max(b.xmin, b.xmax))
y2 = min(max(a.ymin, a.ymax), max(b.ymin, b.ymax))
if x1<x2 and y1<y2:
return Rectangle(x1, y1, x2, y2)
def combine_rect(a: Rectangle, b: Rectangle):
return Rectangle(min(a.xmin, b.xmin), min(a.ymin, b.ymin), max(a.xmax, b.xmax), max(a.ymax, b.ymax))
def intersect_area(a: Rectangle, b: Rectangle):
intersect = intersect_rect(a, b)
if intersect:
dx = intersect.xmax - intersect.xmin
dy = intersect.ymax - intersect.ymin
return dx * dy
def to_bounding_box(rect: Rectangle):
return (rect.xmin, rect.ymin, rect.xmax - rect.xmin, rect.ymax - rect.ymin)
def from_bounding_box(bb):
return Rectangle(bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3])

View File

@@ -0,0 +1,5 @@
openvino==2022.3.0
# pillow for anything not intel linux, pillow-simd is available on x64 linux
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'

View File

@@ -0,0 +1,13 @@
{
"compilerOptions": {
"module": "commonjs",
"target": "ES2021",
"resolveJsonModule": true,
"moduleResolution": "Node16",
"esModuleInterop": true,
"sourceMap": true
},
"include": [
"src/**/*"
]
}

View File

@@ -59,8 +59,8 @@ type Prebuffers<T extends string> = {
[key in T]: PrebufferStreamChunk[];
}
type PrebufferParsers = 'mpegts' | 'mp4' | 'rtsp';
const PrebufferParserValues: PrebufferParsers[] = ['mpegts', 'mp4', 'rtsp'];
type PrebufferParsers = 'rtsp';
const PrebufferParserValues: PrebufferParsers[] = ['rtsp'];
function hasOddities(h264Info: H264Info) {
const h264Oddities = h264Info.fuab
@@ -79,8 +79,6 @@ class PrebufferSession {
parserSessionPromise: Promise<ParserSession<PrebufferParsers>>;
parserSession: ParserSession<PrebufferParsers>;
prebuffers: Prebuffers<PrebufferParsers> = {
mp4: [],
mpegts: [],
rtsp: [],
};
parsers: { [container: string]: StreamParser };
@@ -100,7 +98,6 @@ class PrebufferSession {
ffmpegInputArgumentsKey: string;
lastDetectedAudioCodecKey: string;
lastH264ProbeKey: string;
rebroadcastModeKey: string;
rtspParserKey: string;
rtspServerPath: string;
rtspServerMutedPath: string;
@@ -114,7 +111,6 @@ class PrebufferSession {
this.mixinDevice = mixin.mixinDevice;
this.audioConfigurationKey = 'audioConfiguration-' + this.streamId;
this.ffmpegInputArgumentsKey = 'ffmpegInputArguments-' + this.streamId;
this.rebroadcastModeKey = 'rebroadcastMode-' + this.streamId;
this.lastDetectedAudioCodecKey = 'lastDetectedAudioCodec-' + this.streamId;
this.lastH264ProbeKey = 'lastH264Probe-' + this.streamId;
this.rtspParserKey = 'rtspParser-' + this.streamId;
@@ -164,18 +160,7 @@ class PrebufferSession {
getDetectedIdrInterval() {
const durations: number[] = [];
if (this.prebuffers.mp4.length) {
let last: number;
for (const chunk of this.prebuffers.mp4) {
if (chunk.type === 'mdat') {
if (last)
durations.push(chunk.time - last);
last = chunk.time;
}
}
}
else if (this.prebuffers.rtsp.length) {
if (this.prebuffers.rtsp.length) {
let last: number;
for (const chunk of this.prebuffers.rtsp) {
@@ -259,7 +244,7 @@ class PrebufferSession {
return mediaStreamOptions?.container?.startsWith('rtsp');
}
getParser(rtspMode: boolean, mediaStreamOptions: MediaStreamOptions) {
getParser(mediaStreamOptions: MediaStreamOptions) {
let parser: string;
const rtspParser = this.storage.getItem(this.rtspParserKey);
@@ -267,25 +252,17 @@ class PrebufferSession {
parser = STRING_DEFAULT;
}
else {
if (rtspParser === FFMPEG_PARSER_TCP)
parser = FFMPEG_PARSER_TCP;
if (rtspParser === FFMPEG_PARSER_UDP)
parser = FFMPEG_PARSER_UDP;
// scrypted parser can only be used in rtsp mode.
if (rtspMode && !parser) {
if (!rtspParser || rtspParser === STRING_DEFAULT)
switch (rtspParser) {
case FFMPEG_PARSER_TCP:
case FFMPEG_PARSER_UDP:
case SCRYPTED_PARSER_TCP:
case SCRYPTED_PARSER_UDP:
parser = rtspParser;
break;
default:
parser = SCRYPTED_PARSER_TCP;
if (rtspParser === SCRYPTED_PARSER_TCP)
parser = SCRYPTED_PARSER_TCP;
if (rtspParser === SCRYPTED_PARSER_UDP)
parser = SCRYPTED_PARSER_UDP;
break;
}
// bad config, fall back to ffmpeg tcp parsing.
if (!parser)
parser = FFMPEG_PARSER_TCP;
}
return {
@@ -294,18 +271,6 @@ class PrebufferSession {
}
}
getRebroadcastContainer() {
let mode = this.storage.getItem(this.rebroadcastModeKey) || 'Default';
if (mode === 'Default')
mode = 'RTSP';
const rtspMode = mode?.startsWith('RTSP');
return {
rtspMode: mode?.startsWith('RTSP'),
muxingMp4: !rtspMode,
};
}
async getMixinSettings(): Promise<Setting[]> {
const settings: Setting[] = [];
@@ -313,8 +278,7 @@ class PrebufferSession {
let total = 0;
let start = 0;
const { muxingMp4, rtspMode } = this.getRebroadcastContainer();
for (const prebuffer of (muxingMp4 ? this.prebuffers.mp4 : this.prebuffers.rtsp)) {
for (const prebuffer of this.prebuffers.rtsp) {
start = start || prebuffer.time;
for (const chunk of prebuffer.chunks) {
total += chunk.byteLength;
@@ -326,23 +290,6 @@ class PrebufferSession {
const group = "Streams";
const subgroup = `Stream: ${this.streamName}`;
settings.push(
{
title: 'Rebroadcast Container',
group,
subgroup,
description: `The container format to use when rebroadcasting. The default mode for this camera is RTSP.`,
placeholder: 'RTSP',
choices: [
STRING_DEFAULT,
'MPEG-TS',
'RTSP',
],
key: this.rebroadcastModeKey,
value: this.storage.getItem(this.rebroadcastModeKey) || STRING_DEFAULT,
}
);
const addFFmpegAudioSettings = () => {
settings.push(
{
@@ -383,19 +330,18 @@ class PrebufferSession {
)
}
let usingFFmpeg = muxingMp4;
let usingFFmpeg = false;
if (this.canUseRtspParser(this.advertisedMediaStreamOptions)) {
const canUseScryptedParser = rtspMode;
const defaultValue = canUseScryptedParser && !this.getLastH264Oddities() ?
SCRYPTED_PARSER_TCP : FFMPEG_PARSER_TCP;
const parser = this.getParser(this.advertisedMediaStreamOptions);
const defaultValue = parser.parser;
const scryptedOptions = canUseScryptedParser ? [
const scryptedOptions = [
SCRYPTED_PARSER_TCP,
SCRYPTED_PARSER_UDP,
] : [];
];
const currentParser = this.storage.getItem(this.rtspParserKey) || STRING_DEFAULT;
const currentParser = parser.isDefault ? STRING_DEFAULT : parser.parser;
settings.push(
{
@@ -414,14 +360,9 @@ class PrebufferSession {
}
);
if (!(currentParser === STRING_DEFAULT ? defaultValue : currentParser).includes('Scrypted')) {
usingFFmpeg = true;
}
usingFFmpeg = !parser.parser.includes('Scrypted');
}
if (muxingMp4) {
addFFmpegAudioSettings();
}
if (usingFFmpeg) {
addFFmpegInputSettings();
}
@@ -492,26 +433,24 @@ class PrebufferSession {
addOddities();
}
if (rtspMode) {
settings.push({
group,
subgroup,
key: 'rtspRebroadcastUrl',
title: 'RTSP Rebroadcast Url',
description: 'The RTSP URL of the rebroadcast stream. Substitute localhost as appropriate.',
readonly: true,
value: `rtsp://localhost:${this.mixin.streamSettings.storageSettings.values.rebroadcastPort}/${this.rtspServerPath}`,
});
settings.push({
group,
subgroup,
key: 'rtspRebroadcastMutedUrl',
title: 'RTSP Rebroadcast Url (Muted)',
description: 'The RTSP URL of the muted rebroadcast stream. Substitute localhost as appropriate.',
readonly: true,
value: `rtsp://localhost:${this.mixin.streamSettings.storageSettings.values.rebroadcastPort}/${this.rtspServerMutedPath}`,
});
}
settings.push({
group,
subgroup,
key: 'rtspRebroadcastUrl',
title: 'RTSP Rebroadcast Url',
description: 'The RTSP URL of the rebroadcast stream. Substitute localhost as appropriate.',
readonly: true,
value: `rtsp://localhost:${this.mixin.streamSettings.storageSettings.values.rebroadcastPort}/${this.rtspServerPath}`,
});
settings.push({
group,
subgroup,
key: 'rtspRebroadcastMutedUrl',
title: 'RTSP Rebroadcast Url (Muted)',
description: 'The RTSP URL of the muted rebroadcast stream. Substitute localhost as appropriate.',
readonly: true,
value: `rtsp://localhost:${this.mixin.streamSettings.storageSettings.values.rebroadcastPort}/${this.rtspServerMutedPath}`,
});
return settings;
}
@@ -536,24 +475,10 @@ class PrebufferSession {
const { isUsingDefaultAudioConfig, aacAudio, compatibleAudio, reencodeAudio } = this.getAudioConfig();
const { rtspMode, muxingMp4 } = this.getRebroadcastContainer();
let detectedAudioCodec = this.storage.getItem(this.lastDetectedAudioCodecKey) || undefined;
if (detectedAudioCodec === 'null')
detectedAudioCodec = null;
// only need to probe the audio under specific circumstances.
// rtsp only mode (ie, no mp4 mux) does not need probing.
let probingAudioCodec = false;
if (muxingMp4
&& !audioSoftMuted
&& !advertisedAudioCodec
&& isUsingDefaultAudioConfig
&& detectedAudioCodec === undefined) {
this.console.warn('Camera did not report an audio codec, muting the audio stream and probing the codec.');
probingAudioCodec = true;
}
// the assumed audio codec is the detected codec first and the reported codec otherwise.
const assumedAudioCodec = detectedAudioCodec === undefined
? advertisedAudioCodec?.toLowerCase()
@@ -563,24 +488,7 @@ class PrebufferSession {
// after probing the audio codec is complete, alert the user with appropriate instructions.
// assume the codec is user configurable unless the camera explictly reports otherwise.
const audioIncompatible = !COMPATIBLE_AUDIO_CODECS.includes(assumedAudioCodec);
if (muxingMp4 && !probingAudioCodec && mso?.userConfigurable !== false && !audioSoftMuted) {
if (audioIncompatible) {
// show an alert that rebroadcast needs an explicit setting by the user.
if (isUsingDefaultAudioConfig) {
log.a(`${this.mixin.name} is using the ${assumedAudioCodec} audio codec. Configuring your Camera to use Opus, PCM, or AAC audio is recommended. If this is not possible, Select 'Transcode Audio' in the camera stream's Rebroadcast settings to suppress this alert.`);
}
this.console.warn('Configure your camera to output Opus, PCM, or AAC audio. Suboptimal audio codec in use:', assumedAudioCodec);
}
else if (!audioSoftMuted && isUsingDefaultAudioConfig && advertisedAudioCodec === undefined && detectedAudioCodec !== undefined) {
// handling compatible codecs that were unspecified...
// if (detectedAudioCodec === 'aac') {
// log.a(`${this.mixin.name} did not report a codec and ${detectedAudioCodec} was found during probe. Select '${AAC_AUDIO}' in the camera stream's Rebroadcast settings to suppress this alert and improve startup time.`);
// }
// else {
// log.a(`${this.mixin.name} did not report a codec and ${detectedAudioCodec} was found during probe. Select '${COMPATIBLE_AUDIO}' in the camera stream's Rebroadcast settings to suppress this alert and improve startup time.`);
// }
}
}
// aac needs to have the adts header stripped for mpegts and mp4.
// use this filter sparingly as it prevents ffmpeg from starting on a mismatch.
@@ -599,15 +507,9 @@ class PrebufferSession {
// enable transcoding by default. however, still allow the user to change the settings
// in case something changed.
let mustTranscode = false;
if (muxingMp4 && !probingAudioCodec && isUsingDefaultAudioConfig && audioIncompatible) {
if (mso?.userConfigurable === false)
this.console.log('camera reports it is not user configurable. transcoding due to incompatible codec', assumedAudioCodec);
else
this.console.log('camera audio transcoding due to incompatible codec. configure the camera to use a compatible codec if possible.');
mustTranscode = true;
}
if (audioSoftMuted || probingAudioCodec) {
if (audioSoftMuted) {
// no audio? explicitly disable it.
acodec = ['-an'];
this.audioDisabled = true;
@@ -680,29 +582,14 @@ class PrebufferSession {
};
this.parsers = rbo.parsers;
this.console.log('rebroadcast mode:', rtspMode ? 'rtsp' : 'mpegts');
if (!rtspMode) {
rbo.parsers.mpegts = createMpegTsParser({
vcodec,
acodec,
});
}
else {
const parser = createRtspParser({
vcodec,
// the rtsp parser should always stream copy unless audio is soft muted.
acodec: audioSoftMuted ? acodec : ['-acodec', 'copy'],
});
this.sdp = parser.sdp;
rbo.parsers.rtsp = parser;
}
if (muxingMp4) {
rbo.parsers.mp4 = createFragmentedMp4Parser({
vcodec,
acodec,
});
}
const parser = createRtspParser({
vcodec,
// the rtsp parser should always stream copy unless audio is soft muted.
acodec: audioSoftMuted ? acodec : ['-acodec', 'copy'],
});
this.sdp = parser.sdp;
rbo.parsers.rtsp = parser;
const mo = await this.mixinDevice.getVideoStream(mso);
const isRfc4571 = mo.mimeType === 'x-scrypted/x-rfc4571';
@@ -717,7 +604,7 @@ class PrebufferSession {
const h264Oddities = this.getLastH264Oddities();
if (rtspMode && isRfc4571) {
if (isRfc4571) {
this.usingScryptedParser = true;
this.console.log('bypassing ffmpeg: using scrypted rfc4571 parser')
const json = await mediaManager.convertMediaObjectToJSON<any>(mo, 'x-scrypted/x-rfc4571');
@@ -731,7 +618,7 @@ class PrebufferSession {
const ffmpegInput = JSON.parse(moBuffer.toString()) as FFmpegInput;
sessionMso = ffmpegInput.mediaStreamOptions || this.advertisedMediaStreamOptions;
let { parser, isDefault } = this.getParser(rtspMode, sessionMso);
let { parser, isDefault } = this.getParser(sessionMso);
this.usingScryptedParser = parser === SCRYPTED_PARSER_TCP || parser === SCRYPTED_PARSER_UDP;
this.usingScryptedUdpParser = parser === SCRYPTED_PARSER_UDP;
@@ -788,7 +675,7 @@ class PrebufferSession {
const oddity = hasOddities(h264Probe);
if (oddity && !reportedOddity) {
reportedOddity = true;
let { isDefault } = this.getParser(rtspMode, sessionMso);
let { isDefault } = this.getParser(sessionMso);
this.console.warn('H264 oddity detected.');
if (!isDefault) {
this.console.warn('If there are issues streaming, consider using the Default parser.');
@@ -854,12 +741,6 @@ class PrebufferSession {
this.console.error(`Video codec is not h264. If there are errors, try changing your camera's encoder output.`);
}
if (probingAudioCodec) {
this.console.warn('Audio probe complete, ending rebroadcast session and restarting with detected codecs.');
session.kill(new Error('audio probe completed, restarting'));
return this.startPrebufferSession();
}
this.parserSession = session;
session.killed.finally(() => {
if (this.parserSession === session)
@@ -953,7 +834,7 @@ class PrebufferSession {
handleChargingBatteryEvents() {
if (!this.mixin.interfaces.includes(ScryptedInterface.Charger) ||
!this.mixin.interfaces.includes(ScryptedInterface.Battery)) {
!this.mixin.interfaces.includes(ScryptedInterface.Battery)) {
return;
}
@@ -1119,11 +1000,6 @@ class PrebufferSession {
requestedPrebuffer = Math.min(defaultPrebuffer, this.getDetectedIdrInterval() || defaultPrebuffer);;
}
const { rtspMode, muxingMp4 } = this.getRebroadcastContainer();
const defaultContainer = rtspMode ? 'rtsp' : 'mpegts';
let container: PrebufferParsers = this.parsers[options?.container] ? options?.container as PrebufferParsers : defaultContainer;
const mediaStreamOptions: ResponseMediaStreamOptions = session.negotiateMediaStream(options);
let sdp = await this.sdp;
if (!mediaStreamOptions.video?.h264Info && this.usingScryptedParser) {
@@ -1139,100 +1015,94 @@ class PrebufferSession {
const interleavedMap = new Map<string, number>();
const serverPortMap = new Map<string, RtspTrack>();
let server: FileRtspServer;
const parsedSdp = parseSdp(sdp);
const videoSection = parsedSdp.msections.find(msection => msection.codec && msection.codec === mediaStreamOptions.video?.codec) || parsedSdp.msections.find(msection => msection.type === 'video');
let audioSection = parsedSdp.msections.find(msection => msection.codec && msection.codec === mediaStreamOptions.audio?.codec) || parsedSdp.msections.find(msection => msection.type === 'audio');
if (mediaStreamOptions.audio === null)
audioSection = undefined;
parsedSdp.msections = parsedSdp.msections.filter(msection => msection === videoSection || msection === audioSection);
const filterPrebufferAudio = options?.prebuffer === undefined;
const videoCodec = parsedSdp.msections.find(msection => msection.type === 'video')?.codec;
sdp = parsedSdp.toSdp();
filter = (chunk, prebuffer) => {
// if no prebuffer is explicitly requested, don't send prebuffer audio
if (prebuffer && filterPrebufferAudio && chunk.type !== videoCodec)
return;
if (container === 'rtsp') {
const parsedSdp = parseSdp(sdp);
const videoSection = parsedSdp.msections.find(msection => msection.codec && msection.codec === mediaStreamOptions.video?.codec) || parsedSdp.msections.find(msection => msection.type === 'video');
let audioSection = parsedSdp.msections.find(msection => msection.codec && msection.codec === mediaStreamOptions.audio?.codec) || parsedSdp.msections.find(msection => msection.type === 'audio');
if (mediaStreamOptions.audio === null)
audioSection = undefined;
parsedSdp.msections = parsedSdp.msections.filter(msection => msection === videoSection || msection === audioSection);
const filterPrebufferAudio = options?.prebuffer === undefined;
const videoCodec = parsedSdp.msections.find(msection => msection.type === 'video')?.codec;
sdp = parsedSdp.toSdp();
filter = (chunk, prebuffer) => {
// if no prebuffer is explicitly requested, don't send prebuffer audio
if (prebuffer && filterPrebufferAudio && chunk.type !== videoCodec)
return;
const channel = interleavedMap.get(chunk.type);
if (!interleavePassthrough) {
if (channel == undefined) {
const udp = serverPortMap.get(chunk.type);
if (udp)
server.sendTrack(udp.control, chunk.chunks[1], chunk.type.startsWith('rtcp-'));
return;
}
const chunks = chunk.chunks.slice();
const header = Buffer.from(chunks[0]);
header.writeUInt8(channel, 1);
chunks[0] = header;
chunk = {
startStream: chunk.startStream,
chunks,
}
}
else if (channel === undefined) {
const channel = interleavedMap.get(chunk.type);
if (!interleavePassthrough) {
if (channel == undefined) {
const udp = serverPortMap.get(chunk.type);
if (udp)
server.sendTrack(udp.control, chunk.chunks[1], chunk.type.startsWith('rtcp-'));
return;
}
if (server.writeStream) {
server.writeRtpPayload(chunk.chunks[0], chunk.chunks[1]);
return;
const chunks = chunk.chunks.slice();
const header = Buffer.from(chunks[0]);
header.writeUInt8(channel, 1);
chunks[0] = header;
chunk = {
startStream: chunk.startStream,
chunks,
}
return chunk;
}
else if (channel === undefined) {
return;
}
const hostname = options?.route === 'internal' ? undefined : '0.0.0.0';
const clientPromise = await listenSingleRtspClient({
hostname,
createServer: duplex => {
sdp = addTrackControls(sdp);
server = new FileRtspServer(duplex, sdp);
server.writeConsole = this.console;
return server;
}
});
socketPromise = clientPromise.rtspServerPromise.then(async server => {
if (session.parserSpecific) {
const parserSpecific = session.parserSpecific as RtspSessionParserSpecific;
server.resolveInterleaved = msection => {
const channel = parserSpecific.interleaved.get(msection.codec);
return [channel, channel + 1];
}
}
// server.console = this.console;
await server.handlePlayback();
server.handleTeardown().catch(() => {}).finally(() => server.client.destroy());
for (const track of Object.values(server.setupTracks)) {
if (track.protocol === 'udp') {
serverPortMap.set(track.codec, track);
serverPortMap.set(`rtcp-${track.codec}`, track);
continue;
}
interleavedMap.set(track.codec, track.destination);
interleavedMap.set(`rtcp-${track.codec}`, track.destination + 1);
}
interleavePassthrough = session.parserSpecific && serverPortMap.size === 0;
return server.client;
})
url = clientPromise.url;
if (hostname) {
urls = await getUrlLocalAdresses(this.console, url);
if (server.writeStream) {
server.writeRtpPayload(chunk.chunks[0], chunk.chunks[1]);
return;
}
return chunk;
}
else {
const client = await listenZeroSingleClient();
socketPromise = client.clientPromise;
url = client.url;
const hostname = options?.route === 'internal' ? undefined : '0.0.0.0';
const clientPromise = await listenSingleRtspClient({
hostname,
createServer: duplex => {
sdp = addTrackControls(sdp);
server = new FileRtspServer(duplex, sdp);
server.writeConsole = this.console;
return server;
}
});
socketPromise = clientPromise.rtspServerPromise.then(async server => {
if (session.parserSpecific) {
const parserSpecific = session.parserSpecific as RtspSessionParserSpecific;
server.resolveInterleaved = msection => {
const channel = parserSpecific.interleaved.get(msection.codec);
return [channel, channel + 1];
}
}
// server.console = this.console;
await server.handlePlayback();
server.handleTeardown().catch(() => { }).finally(() => server.client.destroy());
for (const track of Object.values(server.setupTracks)) {
if (track.protocol === 'udp') {
serverPortMap.set(track.codec, track);
serverPortMap.set(`rtcp-${track.codec}`, track);
continue;
}
interleavedMap.set(track.codec, track.destination);
interleavedMap.set(`rtcp-${track.codec}`, track.destination + 1);
}
interleavePassthrough = session.parserSpecific && serverPortMap.size === 0;
return server.client;
})
url = clientPromise.url;
if (hostname) {
urls = await getUrlLocalAdresses(this.console, url);
}
const container = 'rtsp';
mediaStreamOptions.sdp = sdp;
const isActiveClient = options?.refresh !== false;
@@ -1254,7 +1124,7 @@ class PrebufferSession {
if (this.audioDisabled) {
mediaStreamOptions.audio = null;
}
else if (reencodeAudio && muxingMp4) {
else if (reencodeAudio) {
mediaStreamOptions.audio = {
codec: 'aac',
encoder: 'aac',

View File

@@ -121,7 +121,7 @@ export function startRFC4571Parser(console: Console, socket: Readable, sdp: stri
console.log('parsed sdp sps', parsedSps);
}
catch (e) {
console.warn('sdp sps parsing failed');
console.warn('sdp sps parsing failed', e);
}
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/python-codecs",
"version": "0.1.46",
"version": "0.1.47",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/python-codecs",
"version": "0.1.46",
"version": "0.1.47",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/python-codecs",
"version": "0.1.46",
"version": "0.1.47",
"description": "Python Codecs for Scrypted",
"keywords": [
"scrypted",

View File

@@ -35,7 +35,16 @@ class PILImage(scrypted_sdk.VideoFrame):
finally:
rgb.close()
return await to_thread(format)
# TODO: gray...
elif options['format'] == 'gray':
def format():
if pilImage.pilImage.mode == 'L':
return pilImage.pilImage.tobytes()
l = pilImage.pilImage.convert('L')
try:
return l.tobytes()
finally:
l.close()
return await to_thread(format)
def save():
bytesArray = io.BytesIO()

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/server",
"version": "0.7.88",
"version": "0.7.92",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/server",
"version": "0.7.88",
"version": "0.7.92",
"license": "ISC",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.10",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/server",
"version": "0.7.89",
"version": "0.7.92",
"description": "",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.10",
@@ -69,8 +69,8 @@
"prebuild": "rimraf dist",
"build": "tsc --outDir dist",
"postbuild": "node test/check-build-output.js",
"prebeta": "npm version patch && git add package.json && npm run build && git commit -m prebeta",
"beta": "npm publish --tag beta",
"postbeta": "npm version patch && git add package.json && npm run build && git commit -m postbeta",
"release": "npm publish",
"prepublish": "npm run build",
"postrelease": "git tag v$npm_package_version && git push origin v$npm_package_version && npm version patch && git add package.json && git commit -m postrelease",

View File

@@ -25,6 +25,8 @@ export class AddressSettings {
const nif = networkInterfaces[addressOrInterface];
if (!raw && nif) {
for (const addr of nif) {
if (!addr.address || addr.address.startsWith('169.254.') || addr.address.toLowerCase().startsWith('fe80:'))
continue;
ret.push(addr.address);
}
}