Compare commits

..

69 Commits

Author SHA1 Message Date
Koushik Dutta
fa86c31340 prerelease 2023-03-29 12:41:56 -07:00
Koushik Dutta
94ded75d40 docker: fix watchtower token 2023-03-29 12:17:05 -07:00
Koushik Dutta
887b61cd7a prebeta 2023-03-29 11:58:54 -07:00
Koushik Dutta
48e3d30987 server: output docker flavor to logs 2023-03-29 11:58:43 -07:00
Koushik Dutta
02dba3cd71 docker: include flavor in env variable 2023-03-29 11:57:11 -07:00
Koushik Dutta
195769034d docker: include flavor in env variable 2023-03-29 11:56:50 -07:00
Koushik Dutta
39c08aa378 prebeta 2023-03-29 10:19:18 -07:00
Koushik Dutta
fa8056d38e python: purge packages on update 2023-03-29 10:18:34 -07:00
Koushik Dutta
145f116c68 webrtc/h264: reset stapa sent flag after every idr frame 2023-03-29 09:37:41 -07:00
Koushik Dutta
15b6f336e4 common: add h264 fragment information parsing 2023-03-29 08:18:13 -07:00
Koushik Dutta
8b46f0a466 openv: use new pipieline 2023-03-29 08:17:52 -07:00
Koushik Dutta
a20cc5cd89 docker: always install packages for arm 2023-03-29 08:01:08 -07:00
Koushik Dutta
3d068929fd predict: publish 2023-03-28 19:40:14 -07:00
Koushik Dutta
928f9b7579 prerelease 2023-03-28 19:36:48 -07:00
Koushik Dutta
c1c5a42645 server: fixup versioned prefix/node_modules path 2023-03-28 19:36:39 -07:00
Koushik Dutta
12643cdde2 Merge branch 'main' of github.com:koush/scrypted 2023-03-28 19:27:26 -07:00
Koushik Dutta
0bff96a6e6 python-codecs: pil crop is not thread safe https://github.com/python-pillow/Pillow/issues/4848 2023-03-28 19:27:22 -07:00
TA2k
4e7e67de54 Enable ipv6 for avahi (#670)
Enable ipv6 for avahi to allow multiple mdns server on one host
2023-03-28 13:32:14 -07:00
Koushik Dutta
65c4a30004 rebroadcast: use regular file open flags for truncate 2023-03-28 12:45:52 -07:00
Koushik Dutta
309a1dc11f rebroadcast: add truncation error logging 2023-03-28 12:43:07 -07:00
Koushik Dutta
b7904b73b2 Merge branch 'main' of github.com:koush/scrypted 2023-03-28 12:20:14 -07:00
Koushik Dutta
9e9ddbc5f3 rebroadcast: catch various unhandled errors 2023-03-28 12:20:07 -07:00
Koushik Dutta
ceda54f91b rebroadcast: support recording truncation 2023-03-28 12:19:38 -07:00
Koushik Dutta
1d4052b839 common: simplify some socket utils 2023-03-28 10:26:48 -07:00
Koushik Dutta
6a5d6e6617 predict: cleanups 2023-03-28 10:26:43 -07:00
Koushik Dutta
f55cc6066f common: simplify some socket utils 2023-03-28 10:25:50 -07:00
Brett Jia
527714e434 arlo: camera [spot,flood]lights, sirens + only use interfaces when hardware supports it (#660)
* only create vss and siren for supported basestation models

* VideoClips only if camera has cloud recording + start of Cameras as DeviceProviders

* make verbose logging a boolean toggle

* camera spotlights and floodlights

* tweak video clip delete warning

* bump 0.7.5 for beta

* bump 0.7.6 for release + pin deps

* expose sirens on supported cameras

* bump 0.7.7 for release
2023-03-27 16:43:23 -07:00
Koushik Dutta
8a1633ffa3 tensorflow: reduce dependencies for new pipeline 2023-03-27 12:23:44 -07:00
Koushik Dutta
56b2ab9c4f prerelease 2023-03-27 11:53:24 -07:00
Koushik Dutta
d330e2eb9d server: remove os machine usage which only exists in recent node builds 2023-03-27 11:53:19 -07:00
Koushik Dutta
b55e7cacb3 predict: remove old pipline code 2023-03-27 11:14:53 -07:00
Koushik Dutta
c70375db06 prerelease 2023-03-27 09:37:39 -07:00
Koushik Dutta
2c23021d40 server: catch/print startup errors to console and not just events tab 2023-03-27 09:37:29 -07:00
Koushik Dutta
84a4ef4539 mac: reorder unpin 2023-03-27 09:02:37 -07:00
Koushik Dutta
7f3db0549b python-codecs: update requirements.txt 2023-03-27 08:52:20 -07:00
Koushik Dutta
de0e1784a3 amcrest: fix camera default name 2023-03-27 08:50:01 -07:00
Koushik Dutta
5a8798638e homekit: do not start two way audio if only an rtcp packet is received 2023-03-27 08:48:40 -07:00
Koushik Dutta
14da49728c videoanalysis: remove old pipeline 2023-03-26 23:28:52 -07:00
Koushik Dutta
55423b2d09 videoanalysis: yuv/gray extraction fixes 2023-03-26 23:03:08 -07:00
Koushik Dutta
596106247b python-codecs: fix libav and pil issues 2023-03-26 22:43:13 -07:00
Koushik Dutta
5472d90368 opencv: beta 2023-03-26 19:21:22 -07:00
Koushik Dutta
fcf58413fc prebeta 2023-03-26 12:25:30 -07:00
Koushik Dutta
0d03b91753 server: add query tokens to env auth 2023-03-26 12:25:23 -07:00
Koushik Dutta
2fd088e4d6 prebeta 2023-03-26 12:09:21 -07:00
Koushik Dutta
c6933198b2 server: autocreate admin if specified by env 2023-03-26 12:09:15 -07:00
Koushik Dutta
210e684a22 docker: fix watchtower scope https://github.com/koush/scrypted/issues/662 2023-03-26 11:38:38 -07:00
Koushik Dutta
53cc4b6ef3 python-codecs: fix older version of pil 2023-03-26 11:36:09 -07:00
Koushik Dutta
d58d138a68 mac: trim deps, unpin hacked up gst libs 2023-03-25 22:03:14 -07:00
Koushik Dutta
c0199a2b76 mac: remove gstreamer hack from install script 2023-03-25 21:55:57 -07:00
Koushik Dutta
badb1905ce prerelease 2023-03-25 21:54:40 -07:00
Koushik Dutta
735c2dce7b Merge branch 'main' of github.com:koush/scrypted 2023-03-25 21:52:56 -07:00
Koushik Dutta
ffae3f246f python-codecs: fix mac crash 2023-03-25 21:52:51 -07:00
Koushik Dutta
31b424f89f server: mac python fixes 2023-03-25 21:52:32 -07:00
Brett Jia
3b7acc3a90 homekit: merge child lights into cameras (#659) 2023-03-25 20:09:42 -07:00
Koushik Dutta
7e66d1ac7f prebeta 2023-03-25 19:45:11 -07:00
Koushik Dutta
a613da069e server: relax failure on python arch mismatch 2023-03-25 19:45:05 -07:00
Koushik Dutta
40b73c6589 prebeta 2023-03-25 18:42:52 -07:00
Koushik Dutta
ef16ca83a2 server: detect python architecture vs machine mismatch 2023-03-25 18:42:39 -07:00
Koushik Dutta
76bf1d0d3f docker: rollback linux changes 2023-03-25 18:35:40 -07:00
Koushik Dutta
3d5ccf25d1 server: log host os specs 2023-03-25 15:05:08 -07:00
Koushik Dutta
36fcb713d9 videoanalysis: ffmpeg frame generator fixes 2023-03-25 15:04:40 -07:00
Koushik Dutta
e306631850 docker: arm fixes 2023-03-25 14:40:37 -07:00
Koushik Dutta
17400fa886 docker: arm fixes 2023-03-25 14:37:17 -07:00
Koushik Dutta
c6dc628616 docker: arm fixes 2023-03-25 14:31:40 -07:00
Koushik Dutta
f974653e73 videoanalysis: make new pipeline the default 2023-03-25 12:05:35 -07:00
Koushik Dutta
b83880a8a3 Merge branch 'main' of github.com:koush/scrypted 2023-03-25 11:34:37 -07:00
Koushik Dutta
ee4d8f52df pam-diff: fixup score reporting 2023-03-25 11:34:33 -07:00
Brett Jia
3854b75c6e arlo: video clips + virtual security system for sirens (#656)
* fix doorbell device type

* bump 0.7.1 for beta

* standalone camera fixes

* bump 0.7.2 for beta

* more type annotations + trickle discover all devices

* fetch arlo library clips

* log options

* cache library at lower level and fetch clips on demand

* move library timedelta range lower in stack

* wip siren as security system

* virtual security system and tweaks

* vss documentation and settings

* expand vss usage docs

* more docs changes

* force homekit and scrypted to update given vss and siren state

* RE-ENABLING SIREN!!!

* bump 0.7.3 for beta

* bump 0.7.3 for release
2023-03-25 11:13:28 -07:00
Koushik Dutta
07c3173506 docker: fix pip execution command 2023-03-25 10:43:12 -07:00
90 changed files with 1256 additions and 2071 deletions

View File

@@ -361,8 +361,7 @@ export interface RebroadcasterOptions {
},
}
export async function handleRebroadcasterClient(duplex: Promise<Duplex> | Duplex, options?: RebroadcasterOptions) {
const socket = await duplex;
export function handleRebroadcasterClient(socket: Duplex, options?: RebroadcasterOptions) {
const firstWriteData = (data: StreamChunk) => {
if (data.startStream) {
socket.write(data.startStream)

View File

@@ -62,4 +62,4 @@ export async function bind(server: dgram.Socket, port: number) {
}
}
export { listenZero, listenZeroSingleClient } from "@scrypted/server/src/listen-zero";
export { listenZero, listenZeroSingleClient, ListenZeroSingleClientTimeoutError } from "@scrypted/server/src/listen-zero";

View File

@@ -129,6 +129,16 @@ export function getNaluTypes(streamChunk: StreamChunk) {
return getNaluTypesInNalu(streamChunk.chunks[streamChunk.chunks.length - 1].subarray(12))
}
export function getNaluFragmentInformation(nalu: Buffer) {
const naluType = nalu[0] & 0x1f;
const fua = naluType === H264_NAL_TYPE_FU_A;
return {
fua,
fuaStart: fua && !!(nalu[1] & 0x80),
fuaEnd: fua && !!(nalu[1] & 0x40),
}
}
export function getNaluTypesInNalu(nalu: Buffer, fuaRequireStart = false, fuaRequireEnd = false) {
const ret = new Set<number>();
const naluType = nalu[0] & 0x1f;

View File

@@ -59,7 +59,11 @@ RUN apt-get -y install \
# armv7l does not have wheels for any of these
# and compile times would forever, if it works at all.
RUN if [ "$(uname -m)" = "armv7l" ]; \
# furthermore, it's possible to run 32bit docker on 64bit arm,
# which causes weird behavior in python which looks at the arch version
# which still reports 64bit, even if running in 32bit docker.
# this scenario is not supported and will be reported at runtime.
RUN if [ "$(uname -m)" != "x86_64" ]; \
then \
apt-get -y install \
python3-matplotlib \
@@ -73,7 +77,7 @@ RUN if [ "$(uname -m)" = "armv7l" ]; \
RUN python3 -m pip install --upgrade pip
# pyvips is broken on x86 due to mismatch ffi
# https://stackoverflow.com/questions/62658237/it-seems-that-the-version-of-the-libffi-library-seen-at-runtime-is-different-fro
RUN pip install --force-reinstall --no-binary :all: cffi
RUN python3 -m pip install --force-reinstall --no-binary :all: cffi
RUN python3 -m pip install aiofiles debugpy typing_extensions psutil
################################################################
@@ -91,7 +95,8 @@ ENV SCRYPTED_INSTALL_PATH="/server"
# changing this forces pip and npm to perform reinstalls.
# if this base image changes, this version must be updated.
ENV SCRYPTED_BASE_VERSION=20230322
ENV SCRYPTED_BASE_VERSION=20230329
ENV SCRYPTED_DOCKER_FLAVOR=full
################################################################
# End section generated from template/Dockerfile.full.footer

View File

@@ -42,4 +42,5 @@ ENV SCRYPTED_INSTALL_PATH="/server"
# changing this forces pip and npm to perform reinstalls.
# if this base image changes, this version must be updated.
ENV SCRYPTED_BASE_VERSION=20230322
ENV SCRYPTED_BASE_VERSION=20230329
ENV SCRYPTED_DOCKER_FLAVOR=lite

View File

@@ -21,4 +21,5 @@ ENV SCRYPTED_INSTALL_PATH="/server"
# changing this forces pip and npm to perform reinstalls.
# if this base image changes, this version must be updated.
ENV SCRYPTED_BASE_VERSION=20230322
ENV SCRYPTED_BASE_VERSION=20230329
ENV SCRYPTED_DOCKER_FLAVOR=thin

View File

@@ -90,4 +90,4 @@ services:
# Must match the port in the auto update url above.
- 10444:8080
# check for updates once an hour (interval is in seconds)
command: --interval 3600 --cleanup
command: --interval 3600 --cleanup --scope scrypted

View File

@@ -1,7 +1,7 @@
[server]
#host-name=
use-ipv4=yes
use-ipv6=no
use-ipv6=yes
enable-dbus=yes
ratelimit-interval-usec=1000000
ratelimit-burst=1000
@@ -14,4 +14,4 @@ rlimit-core=0
rlimit-data=4194304
rlimit-fsize=0
rlimit-nofile=768
rlimit-stack=4194304
rlimit-stack=4194304

View File

@@ -44,51 +44,25 @@ RUN_IGNORE brew install node@18
RUN brew install libvips
# dlib
RUN brew install cmake
# gstreamer plugins
RUN_IGNORE brew install gstreamer gst-plugins-base gst-plugins-good gst-plugins-bad gst-plugins-ugly
# gst python bindings
RUN_IGNORE brew install gst-python
# python image library
# todo: consider removing this
RUN_IGNORE brew install pillow
### HACK WORKAROUND
### https://github.com/koush/scrypted/issues/544
brew unpin gstreamer
brew unpin gst-python
brew unpin gst-plugins-ugly
brew unpin gst-plugins-good
brew unpin gst-plugins-base
brew unpin gst-plugins-good
brew unpin gst-plugins-bad
brew unpin gst-plugins-ugly
brew unpin gst-libav
brew unlink gstreamer
brew unlink gst-python
brew unlink gst-plugins-ugly
brew unlink gst-plugins-good
brew unlink gst-plugins-base
brew unlink gst-plugins-bad
brew unlink gst-libav
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gstreamer.rb && brew install ./gstreamer.rb
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-python.rb && brew install ./gst-python.rb
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-plugins-ugly.rb && brew install ./gst-plugins-ugly.rb
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-plugins-good.rb && brew install ./gst-plugins-good.rb
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-plugins-base.rb && brew install ./gst-plugins-base.rb
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-plugins-bad.rb && brew install ./gst-plugins-bad.rb
curl -O https://raw.githubusercontent.com/Homebrew/homebrew-core/49a8667f0c1a6579fe887bc0fa1c0ce682eb01c8/Formula/gst-libav.rb && brew install ./gst-libav.rb
brew pin gstreamer
brew pin gst-python
brew pin gst-plugins-ugly
brew pin gst-plugins-good
brew pin gst-plugins-base
brew pin gst-plugins-bad
brew pin gst-libav
brew unpin gst-python
### END HACK WORKAROUND
# gstreamer plugins
RUN_IGNORE brew install gstreamer gst-plugins-base gst-plugins-good gst-plugins-bad gst-libav
# gst python bindings
RUN_IGNORE brew install gst-python
ARCH=$(arch)
if [ "$ARCH" = "arm64" ]
then

View File

@@ -42,7 +42,7 @@ fi
WATCHTOWER_HTTP_API_TOKEN=$(echo $RANDOM | md5sum)
DOCKER_COMPOSE_YML=$SCRYPTED_HOME/docker-compose.yml
echo "Created $DOCKER_COMPOSE_YML"
curl -s https://raw.githubusercontent.com/koush/scrypted/main/docker/docker-compose.yml | sed s/SET_THIS_TO_SOME_RANDOM_TEXT/"$(echo $RANDOM | md5sum)"/g > $DOCKER_COMPOSE_YML
curl -s https://raw.githubusercontent.com/koush/scrypted/main/docker/docker-compose.yml | sed s/SET_THIS_TO_SOME_RANDOM_TEXT/"$(echo $RANDOM | md5sum | head -c 32)"/g > $DOCKER_COMPOSE_YML
echo "Setting permissions on $SCRYPTED_HOME"
chown -R $SERVICE_USER $SCRYPTED_HOME

View File

@@ -10,7 +10,8 @@ ENV SCRYPTED_INSTALL_PATH="/server"
# changing this forces pip and npm to perform reinstalls.
# if this base image changes, this version must be updated.
ENV SCRYPTED_BASE_VERSION=20230322
ENV SCRYPTED_BASE_VERSION=20230329
ENV SCRYPTED_DOCKER_FLAVOR=full
################################################################
# End section generated from template/Dockerfile.full.footer

View File

@@ -56,7 +56,11 @@ RUN apt-get -y install \
# armv7l does not have wheels for any of these
# and compile times would forever, if it works at all.
RUN if [ "$(uname -m)" = "armv7l" ]; \
# furthermore, it's possible to run 32bit docker on 64bit arm,
# which causes weird behavior in python which looks at the arch version
# which still reports 64bit, even if running in 32bit docker.
# this scenario is not supported and will be reported at runtime.
RUN if [ "$(uname -m)" != "x86_64" ]; \
then \
apt-get -y install \
python3-matplotlib \
@@ -70,7 +74,7 @@ RUN if [ "$(uname -m)" = "armv7l" ]; \
RUN python3 -m pip install --upgrade pip
# pyvips is broken on x86 due to mismatch ffi
# https://stackoverflow.com/questions/62658237/it-seems-that-the-version-of-the-libffi-library-seen-at-runtime-is-different-fro
RUN pip install --force-reinstall --no-binary :all: cffi
RUN python3 -m pip install --force-reinstall --no-binary :all: cffi
RUN python3 -m pip install aiofiles debugpy typing_extensions psutil
################################################################

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/amcrest",
"version": "0.0.119",
"version": "0.0.120",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/amcrest",
"version": "0.0.119",
"version": "0.0.120",
"license": "Apache",
"dependencies": {
"@koush/axios-digest-auth": "^0.8.5",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/amcrest",
"version": "0.0.119",
"version": "0.0.120",
"description": "Amcrest Plugin for Scrypted",
"author": "Scrypted",
"license": "Apache",

View File

@@ -616,7 +616,7 @@ class AmcrestProvider extends RtspProvider {
this.console.warn('Error probing two way audio', e);
}
}
settings.newCamera ||= 'Hikvision Camera';
settings.newCamera ||= 'Amcrest Camera';
nativeId = await super.createDevice(settings, nativeId);

View File

@@ -22,6 +22,6 @@
//"scrypted.volumeRoot": "${config:scrypted.serverRoot}/volume",
"python.analysis.extraPaths": [
"./node_modules/@scrypted/sdk/scrypted_python"
"./node_modules/@scrypted/sdk/types/scrypted_python"
]
}

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/arlo",
"version": "0.7.0",
"version": "0.7.7",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/arlo",
"version": "0.7.0",
"version": "0.7.7",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/arlo",
"version": "0.7.0",
"version": "0.7.7",
"description": "Arlo Plugin for Scrypted",
"keywords": [
"scrypted",

View File

@@ -29,7 +29,8 @@ from .sse_stream_async import EventStream
from .logging import logger
# Import all of the other stuff.
from datetime import datetime
from datetime import datetime, timedelta
from cachetools import cached, TTLCache
import asyncio
import sys
@@ -710,7 +711,20 @@ class Arlo(object):
callback,
)
def SirenOn(self, basestation):
def SirenOn(self, basestation, camera=None):
if camera is not None:
resource = f"siren/{camera.get('deviceId')}"
return self.Notify(basestation, {
"action": "set",
"resource": resource,
"publishResponse": True,
"properties": {
"sirenState": "on",
"duration": 300,
"volume": 8,
"pattern": "alarm"
}
})
return self.Notify(basestation, {
"action": "set",
"resource": "siren",
@@ -723,7 +737,20 @@ class Arlo(object):
}
})
def SirenOff(self, basestation):
def SirenOff(self, basestation, camera=None):
if camera is not None:
resource = f"siren/{camera.get('deviceId')}"
return self.Notify(basestation, {
"action": "set",
"resource": resource,
"publishResponse": True,
"properties": {
"sirenState": "off",
"duration": 300,
"volume": 8,
"pattern": "alarm"
}
})
return self.Notify(basestation, {
"action": "set",
"resource": "siren",
@@ -735,3 +762,113 @@ class Arlo(object):
"pattern": "alarm"
}
})
def SpotlightOn(self, basestation, camera):
resource = f"cameras/{camera.get('deviceId')}"
return self.Notify(basestation, {
"action": "set",
"resource": resource,
"publishResponse": True,
"properties": {
"spotlight": {
"enabled": True,
},
},
})
def SpotlightOff(self, basestation, camera):
resource = f"cameras/{camera.get('deviceId')}"
return self.Notify(basestation, {
"action": "set",
"resource": resource,
"publishResponse": True,
"properties": {
"spotlight": {
"enabled": False,
},
},
})
def FloodlightOn(self, basestation, camera):
resource = f"cameras/{camera.get('deviceId')}"
return self.Notify(basestation, {
"action": "set",
"resource": resource,
"publishResponse": True,
"properties": {
"floodlight": {
"on": True,
},
},
})
def FloodlightOff(self, basestation, camera):
resource = f"cameras/{camera.get('deviceId')}"
return self.Notify(basestation, {
"action": "set",
"resource": resource,
"publishResponse": True,
"properties": {
"floodlight": {
"on": False,
},
},
})
def GetLibrary(self, device, from_date: datetime, to_date: datetime):
"""
This call returns the following:
presignedContentUrl is a link to the actual video in Amazon AWS.
presignedThumbnailUrl is a link to the thumbnail .jpg of the actual video in Amazon AWS.
[
{
"mediaDurationSecond": 30,
"contentType": "video/mp4",
"name": "XXXXXXXXXXXXX",
"presignedContentUrl": "https://arlos3-prod-z2.s3.amazonaws.com/XXXXXXX_XXXX_XXXX_XXXX_XXXXXXXXXXXXX/XXX-XXXXXXX/XXXXXXXXXXXXX/recordings/XXXXXXXXXXXXX.mp4?AWSAccessKeyId=XXXXXXXXXXXXXXXXXXXX&Expires=1472968703&Signature=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"lastModified": 1472881430181,
"localCreatedDate": XXXXXXXXXXXXX,
"presignedThumbnailUrl": "https://arlos3-prod-z2.s3.amazonaws.com/XXXXXXX_XXXX_XXXX_XXXX_XXXXXXXXXXXXX/XXX-XXXXXXX/XXXXXXXXXXXXX/recordings/XXXXXXXXXXXXX_thumb.jpg?AWSAccessKeyId=XXXXXXXXXXXXXXXXXXXX&Expires=1472968703&Signature=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"reason": "motionRecord",
"deviceId": "XXXXXXXXXXXXX",
"createdBy": "XXXXXXXXXXXXX",
"createdDate": "20160903",
"timeZone": "America/Chicago",
"ownerId": "XXX-XXXXXXX",
"utcCreatedDate": XXXXXXXXXXXXX,
"currentState": "new",
"mediaDuration": "00:00:30"
}
]
"""
# give the query range a bit of buffer
from_date_internal = from_date - timedelta(days=1)
to_date_internal = to_date + timedelta(days=1)
return [
result for result in
self._getLibraryCached(from_date_internal.strftime("%Y%m%d"), to_date_internal.strftime("%Y%m%d"))
if result["deviceId"] == device["deviceId"]
and datetime.fromtimestamp(int(result["name"]) / 1000.0) <= to_date
and datetime.fromtimestamp(int(result["name"]) / 1000.0) >= from_date
]
@cached(cache=TTLCache(maxsize=512, ttl=60))
def _getLibraryCached(self, from_date: str, to_date: str):
logger.debug(f"Library cache miss for {from_date}, {to_date}")
return self.request.post(
f'https://{self.BASE_URL}/hmsweb/users/library',
{
'dateFrom': from_date,
'dateTo': to_date
}
)
def GetSmartFeatures(self, device):
smart_features = self._getSmartFeaturesCached()
key = f"{device['owner']['ownerId']}_{device['deviceId']}"
return smart_features["features"].get(key)
@cached(cache=TTLCache(maxsize=1, ttl=60))
def _getSmartFeaturesCached(self):
return self.request.get(f'https://{self.BASE_URL}/hmsweb/users/subscription/smart/features')

View File

@@ -1,8 +1,18 @@
from __future__ import annotations
import traceback
from typing import List, TYPE_CHECKING
from scrypted_sdk import ScryptedDeviceBase
from scrypted_sdk.types import Device
from .logging import ScryptedDeviceLoggerMixin
from .util import BackgroundTaskMixin
from .provider import ArloProvider
if TYPE_CHECKING:
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
from .provider import ArloProvider
class ArloDeviceBase(ScryptedDeviceBase, ScryptedDeviceLoggerMixin, BackgroundTaskMixin):
nativeId: str = None
@@ -22,11 +32,11 @@ class ArloDeviceBase(ScryptedDeviceBase, ScryptedDeviceLoggerMixin, BackgroundTa
self.provider = provider
self.logger.setLevel(self.provider.get_current_log_level())
def __del__(self):
def __del__(self) -> None:
self.stop_subscriptions = True
self.cancel_pending_tasks()
def get_applicable_interfaces(self) -> list:
def get_applicable_interfaces(self) -> List[str]:
"""Returns the list of Scrypted interfaces that applies to this device."""
return []
@@ -34,7 +44,7 @@ class ArloDeviceBase(ScryptedDeviceBase, ScryptedDeviceLoggerMixin, BackgroundTa
"""Returns the Scrypted device type that applies to this device."""
return ""
def get_device_manifest(self) -> dict:
def get_device_manifest(self) -> Device:
"""Returns the Scrypted device manifest representing this device."""
parent = None
if self.arlo_device.get("parentId") and self.arlo_device["parentId"] != self.arlo_device["deviceId"]:
@@ -54,6 +64,17 @@ class ArloDeviceBase(ScryptedDeviceBase, ScryptedDeviceLoggerMixin, BackgroundTa
"providerNativeId": parent,
}
def get_builtin_child_device_manifests(self) -> list:
def get_builtin_child_device_manifests(self) -> List[Device]:
"""Returns the list of child device manifests representing hardware features built into this device."""
return []
return []
@classmethod
def async_print_exception_guard(self, fn):
"""Decorator to print an exception's stack trace before re-raising the exception."""
async def wrapped(*args, **kwargs):
try:
return await fn(*args, **kwargs)
except Exception:
traceback.print_exc()
raise
return wrapped

View File

@@ -1,20 +1,45 @@
from scrypted_sdk import ScryptedDeviceBase
from scrypted_sdk.types import DeviceProvider, ScryptedInterface, ScryptedDeviceType
from __future__ import annotations
from .device_base import ArloDeviceBase
from .siren import ArloSiren
from typing import List, TYPE_CHECKING
from scrypted_sdk import ScryptedDeviceBase
from scrypted_sdk.types import Device, DeviceProvider, ScryptedInterface, ScryptedDeviceType
from .base import ArloDeviceBase
from .vss import ArloSirenVirtualSecuritySystem
if TYPE_CHECKING:
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
from .provider import ArloProvider
class ArloBasestation(ArloDeviceBase, DeviceProvider):
siren: ArloSiren = None
MODELS_WITH_SIRENS = [
"vmb4000",
"vmb4500"
]
def get_applicable_interfaces(self) -> list:
vss: ArloSirenVirtualSecuritySystem = None
def __init__(self, nativeId: str, arlo_basestation: dict, provider: ArloProvider) -> None:
super().__init__(nativeId=nativeId, arlo_device=arlo_basestation, arlo_basestation=arlo_basestation, provider=provider)
@property
def has_siren(self) -> bool:
return any([self.arlo_device["modelId"].lower().startswith(model) for model in ArloBasestation.MODELS_WITH_SIRENS])
def get_applicable_interfaces(self) -> List[str]:
return [ScryptedInterface.DeviceProvider.value]
def get_device_type(self) -> str:
return ScryptedDeviceType.DeviceProvider.value
def get_builtin_child_device_manifests(self) -> list:
def get_builtin_child_device_manifests(self) -> List[Device]:
if not self.has_siren:
# this basestation has no builtin siren, so no manifests to return
return []
vss = self.get_or_create_vss()
return [
{
"info": {
@@ -23,22 +48,24 @@ class ArloBasestation(ArloDeviceBase, DeviceProvider):
"firmware": self.arlo_device.get("firmwareVersion"),
"serialNumber": self.arlo_device["deviceId"],
},
"nativeId": f'{self.arlo_device["deviceId"]}.siren',
"name": f'{self.arlo_device["deviceName"]} Siren',
"interfaces": [ScryptedInterface.OnOff.value],
"type": ScryptedDeviceType.Siren.value,
"nativeId": vss.nativeId,
"name": f'{self.arlo_device["deviceName"]} Siren Virtual Security System',
"interfaces": vss.get_applicable_interfaces(),
"type": vss.get_device_type(),
"providerNativeId": self.nativeId,
}
]
},
] + vss.get_builtin_child_device_manifests()
async def getDevice(self, nativeId: str) -> ScryptedDeviceBase:
if not nativeId.startswith(self.nativeId):
# must be a camera, so get it from the provider
return await self.provider.getDevice(nativeId)
if not nativeId.endswith("vss"):
return None
return self.get_or_create_vss()
if nativeId.endswith("siren"):
if not self.siren:
self.siren = ArloSiren(nativeId, self.arlo_device, self.arlo_basestation, self.provider)
return self.siren
return None
def get_or_create_vss(self) -> ArloSirenVirtualSecuritySystem:
vss_id = f'{self.arlo_device["deviceId"]}.vss'
if not self.vss:
self.vss = ArloSirenVirtualSecuritySystem(vss_id, self.arlo_device, self.arlo_basestation, self.provider, self)
return self.vss

View File

@@ -1,26 +1,68 @@
from __future__ import annotations
import asyncio
from datetime import datetime, timedelta
import json
import threading
import time
from typing import List, TYPE_CHECKING
import scrypted_arlo_go
import scrypted_sdk
from scrypted_sdk.types import Settings, Camera, VideoCamera, MotionSensor, Battery, MediaObject, ScryptedMimeTypes, ScryptedInterface, ScryptedDeviceType
from scrypted_sdk.types import Setting, Settings, Device, Camera, VideoCamera, VideoClips, VideoClip, VideoClipOptions, MotionSensor, Battery, DeviceProvider, MediaObject, ResponsePictureOptions, ResponseMediaStreamOptions, ScryptedMimeTypes, ScryptedInterface, ScryptedDeviceType
from .device_base import ArloDeviceBase
from .provider import ArloProvider
from .base import ArloDeviceBase
from .spotlight import ArloSpotlight, ArloFloodlight
from .vss import ArloSirenVirtualSecuritySystem
from .child_process import HeartbeatChildProcess
from .util import BackgroundTaskMixin
if TYPE_CHECKING:
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
from .provider import ArloProvider
class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, DeviceProvider, VideoClips, MotionSensor, Battery):
MODELS_WITH_SPOTLIGHTS = [
"vmc4040p",
"vmc2030",
"vmc2032",
"vmc4041p",
"vmc4050p",
"vmc5040",
"vml2030",
"vml4030",
]
MODELS_WITH_FLOODLIGHTS = ["fb1001"]
MODELS_WITH_SIRENS = [
"vmb4000",
"vmb4500",
"vmb4540",
"vmb5000",
"vmc4040p",
"fb1001",
"vmc2030",
"vmc2020",
"vmc2032",
"vmc4041p",
"vmc4050p",
"vmc5040",
"vml2030",
"vmc4030",
"vml4030",
"vmc4030p",
]
class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, MotionSensor, Battery):
timeout: int = 30
intercom_session = None
light: ArloSpotlight = None
vss: ArloSirenVirtualSecuritySystem = None
def __init__(self, nativeId: str, arlo_device: dict, arlo_basestation: dict, provider: ArloProvider) -> None:
super().__init__(nativeId=nativeId, arlo_device=arlo_device, arlo_basestation=arlo_basestation, provider=provider)
self.start_motion_subscription()
self.start_battery_subscription()
@@ -42,7 +84,7 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, MotionSensor, Ba
self.provider.arlo.SubscribeToBatteryEvents(self.arlo_basestation, self.arlo_device, callback)
)
def get_applicable_interfaces(self) -> list:
def get_applicable_interfaces(self) -> List[str]:
results = set([
ScryptedInterface.VideoCamera.value,
ScryptedInterface.Camera.value,
@@ -59,6 +101,12 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, MotionSensor, Ba
results.add(ScryptedInterface.RTCSignalingChannel.value)
results.discard(ScryptedInterface.Intercom.value)
if self.has_siren or self.has_spotlight or self.has_floodlight:
results.add(ScryptedInterface.DeviceProvider.value)
if self.has_cloud_recording:
results.add(ScryptedInterface.VideoClips.value)
if not self._can_push_to_talk():
results.discard(ScryptedInterface.RTCSignalingChannel.value)
results.discard(ScryptedInterface.Intercom.value)
@@ -68,6 +116,42 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, MotionSensor, Ba
def get_device_type(self) -> str:
return ScryptedDeviceType.Camera.value
def get_builtin_child_device_manifests(self) -> List[Device]:
results = []
if self.has_spotlight or self.has_floodlight:
light = self.get_or_create_spotlight_or_floodlight()
results.append({
"info": {
"model": f"{self.arlo_device['modelId']} {self.arlo_device['properties'].get('hwVersion', '')}".strip(),
"manufacturer": "Arlo",
"firmware": self.arlo_device.get("firmwareVersion"),
"serialNumber": self.arlo_device["deviceId"],
},
"nativeId": light.nativeId,
"name": f'{self.arlo_device["deviceName"]} {"Spotlight" if self.has_spotlight else "Floodlight"}',
"interfaces": light.get_applicable_interfaces(),
"type": light.get_device_type(),
"providerNativeId": self.nativeId,
})
if self.has_siren:
vss = self.get_or_create_vss()
results.extend([
{
"info": {
"model": f"{self.arlo_device['modelId']} {self.arlo_device['properties'].get('hwVersion', '')}".strip(),
"manufacturer": "Arlo",
"firmware": self.arlo_device.get("firmwareVersion"),
"serialNumber": self.arlo_device["deviceId"],
},
"nativeId": vss.nativeId,
"name": f'{self.arlo_device["deviceName"]} Siren Virtual Security System',
"interfaces": vss.get_applicable_interfaces(),
"type": vss.get_device_type(),
"providerNativeId": self.nativeId,
},
] + vss.get_builtin_child_device_manifests())
return results
@property
def webrtc_emulation(self) -> bool:
if self.storage:
@@ -85,7 +169,23 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, MotionSensor, Ba
else:
return True
async def getSettings(self) -> list:
@property
def has_cloud_recording(self) -> bool:
return self.provider.arlo.GetSmartFeatures(self.arlo_device)["planFeatures"]["eventRecording"]
@property
def has_spotlight(self) -> bool:
return any([self.arlo_device["modelId"].lower().startswith(model) for model in ArloCamera.MODELS_WITH_SPOTLIGHTS])
@property
def has_floodlight(self) -> bool:
return any([self.arlo_device["modelId"].lower().startswith(model) for model in ArloCamera.MODELS_WITH_FLOODLIGHTS])
@property
def has_siren(self) -> bool:
return any([self.arlo_device["modelId"].lower().startswith(model) for model in ArloCamera.MODELS_WITH_SIRENS])
async def getSettings(self) -> List[Setting]:
if self._can_push_to_talk():
return [
{
@@ -109,9 +209,9 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, MotionSensor, Ba
async def putSetting(self, key, value) -> None:
if key in ["webrtc_emulation", "two_way_audio"]:
self.storage.setItem(key, value == "true")
await self.provider.discoverDevices()
await self.provider.discover_devices()
async def getPictureOptions(self) -> list:
async def getPictureOptions(self) -> List[ResponsePictureOptions]:
return []
async def takePicture(self, options: dict = None) -> MediaObject:
@@ -131,7 +231,7 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, MotionSensor, Ba
return await scrypted_sdk.mediaManager.createMediaObject(str.encode(pic_url), ScryptedMimeTypes.Url.value)
async def getVideoStreamOptions(self) -> list:
async def getVideoStreamOptions(self) -> List[ResponseMediaStreamOptions]:
return [
{
"id": 'default',
@@ -200,21 +300,110 @@ class ArloCamera(ArloDeviceBase, Settings, Camera, VideoCamera, MotionSensor, Ba
except Exception as e:
self.logger.error(e)
async def startIntercom(self, media):
async def startIntercom(self, media) -> None:
self.logger.info("Starting intercom")
self.intercom_session = ArloCameraRTCSignalingSession(self)
await self.intercom_session.initialize_push_to_talk(media)
async def stopIntercom(self):
async def stopIntercom(self) -> None:
self.logger.info("Stopping intercom")
if self.intercom_session is not None:
await self.intercom_session.shutdown()
self.intercom_session = None
def _can_push_to_talk(self):
def _can_push_to_talk(self) -> bool:
# Right now, only implement push to talk for basestation cameras
return self.arlo_device["deviceId"] != self.arlo_device["parentId"]
async def getVideoClip(self, videoId: str) -> MediaObject:
self.logger.info(f"Getting video clip {videoId}")
id_as_time = int(videoId) / 1000.0
start = datetime.fromtimestamp(id_as_time) - timedelta(seconds=10)
end = datetime.fromtimestamp(id_as_time) + timedelta(seconds=10)
library = self.provider.arlo.GetLibrary(self.arlo_device, start, end)
for recording in library:
if videoId == recording["name"]:
return await scrypted_sdk.mediaManager.createMediaObjectFromUrl(recording["presignedContentUrl"])
self.logger.warn(f"Clip {videoId} not found")
return None
async def getVideoClipThumbnail(self, thumbnailId: str) -> MediaObject:
self.logger.info(f"Getting video clip thumbnail {thumbnailId}")
id_as_time = int(thumbnailId) / 1000.0
start = datetime.fromtimestamp(id_as_time) - timedelta(seconds=10)
end = datetime.fromtimestamp(id_as_time) + timedelta(seconds=10)
library = self.provider.arlo.GetLibrary(self.arlo_device, start, end)
for recording in library:
if thumbnailId == recording["name"]:
return await scrypted_sdk.mediaManager.createMediaObjectFromUrl(recording["presignedThumbnailUrl"])
self.logger.warn(f"Clip thumbnail {thumbnailId} not found")
return None
async def getVideoClips(self, options: VideoClipOptions = None) -> List[VideoClip]:
self.logger.info(f"Fetching remote video clips {options}")
start = datetime.fromtimestamp(options["startTime"] / 1000.0)
end = datetime.fromtimestamp(options["endTime"] / 1000.0)
library = self.provider.arlo.GetLibrary(self.arlo_device, start, end)
clips = []
for recording in library:
clip = {
"duration": recording["mediaDurationSecond"] * 1000.0,
"id": recording["name"],
"thumbnailId": recording["name"],
"videoId": recording["name"],
"startTime": recording["utcCreatedDate"],
"description": recording["reason"],
"resources": {
"thumbnail": {
"href": recording["presignedThumbnailUrl"],
},
"video": {
"href": recording["presignedContentUrl"],
},
},
}
clips.append(clip)
if options.get("reverseOrder"):
clips.reverse()
return clips
@ArloDeviceBase.async_print_exception_guard
async def removeVideoClips(self, videoClipIds: List[str]) -> None:
# Arlo does support deleting, but let's be safe and disable that
raise Exception("deleting Arlo video clips is not implemented by this plugin")
async def getDevice(self, nativeId: str) -> ArloDeviceBase:
if (nativeId.endswith("spotlight") and self.has_spotlight) or (nativeId.endswith("floodlight") and self.has_floodlight):
return self.get_or_create_spotlight_or_floodlight()
if nativeId.endswith("vss") and self.has_siren:
return self.get_or_create_vss()
return None
def get_or_create_spotlight_or_floodlight(self) -> ArloSpotlight:
if self.has_spotlight:
light_id = f'{self.arlo_device["deviceId"]}.spotlight'
if not self.light:
self.light = ArloSpotlight(light_id, self.arlo_device, self.arlo_basestation, self.provider, self)
elif self.has_floodlight:
light_id = f'{self.arlo_device["deviceId"]}.floodlight'
if not self.light:
self.light = ArloFloodlight(light_id, self.arlo_device, self.arlo_basestation, self.provider, self)
return self.light
def get_or_create_vss(self) -> ArloSirenVirtualSecuritySystem:
if self.has_siren:
vss_id = f'{self.arlo_device["deviceId"]}.vss'
if not self.vss:
self.vss = ArloSirenVirtualSecuritySystem(vss_id, self.arlo_device, self.arlo_basestation, self.provider, self)
return self.vss
class ArloCameraRTCSignalingSession(BackgroundTaskMixin):
def __init__(self, camera):

View File

@@ -1,13 +1,19 @@
from scrypted_sdk.types import BinarySensor, ScryptedInterface
from __future__ import annotations
from typing import List, TYPE_CHECKING
from scrypted_sdk.types import BinarySensor, ScryptedInterface, ScryptedDeviceType
from .camera import ArloCamera
from .provider import ArloProvider
if TYPE_CHECKING:
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
from .provider import ArloProvider
class ArloDoorbell(ArloCamera, BinarySensor):
def __init__(self, nativeId: str, arlo_device: dict, arlo_basestation: dict, provider: ArloProvider) -> None:
super().__init__(nativeId=nativeId, arlo_device=arlo_device, arlo_basestation=arlo_basestation, provider=provider)
self.start_doorbell_subscription()
def start_doorbell_subscription(self) -> None:
@@ -19,7 +25,10 @@ class ArloDoorbell(ArloCamera, BinarySensor):
self.provider.arlo.SubscribeToDoorbellEvents(self.arlo_basestation, self.arlo_device, callback)
)
def get_applicable_interfaces(self) -> list:
def get_device_type(self) -> str:
return ScryptedDeviceType.Doorbell.value
def get_applicable_interfaces(self) -> List[str]:
camera_interfaces = super().get_applicable_interfaces()
camera_interfaces.append(ScryptedInterface.BinarySensor.value)

View File

@@ -6,19 +6,24 @@ import logging
import re
import requests
import traceback
from typing import List
import scrypted_sdk
from scrypted_sdk import ScryptedDeviceBase
from scrypted_sdk.types import Settings, DeviceProvider, DeviceDiscovery, ScryptedInterface, ScryptedDeviceType
from scrypted_sdk.types import Setting, SettingValue, Settings, DeviceProvider, ScryptedInterface
from .arlo import Arlo
from .arlo.arlo_async import change_stream_class
from .arlo.logging import logger as arlo_lib_logger
from .logging import ScryptedDeviceLoggerMixin
from .util import BackgroundTaskMixin
from .camera import ArloCamera
from .doorbell import ArloDoorbell
from .basestation import ArloBasestation
from .base import ArloDeviceBase
class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery, ScryptedDeviceLoggerMixin, BackgroundTaskMixin):
class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, ScryptedDeviceLoggerMixin, BackgroundTaskMixin):
arlo_cameras = None
arlo_basestations = None
_arlo_mfa_code = None
@@ -183,7 +188,7 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
async def do_arlo_setup(self) -> None:
try:
await self.discoverDevices()
await self.discover_devices()
await self.arlo.Subscribe([
(self.arlo_basestations[camera["parentId"]], camera) for camera in self.arlo_cameras.values()
])
@@ -366,7 +371,7 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
self.logger.info(f"Exiting IMAP refresh loop {id(imap_signal)}")
return
async def getSettings(self) -> list:
async def getSettings(self) -> List[Setting]:
results = [
{
"group": "General",
@@ -467,17 +472,16 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
{
"group": "General",
"key": "plugin_verbosity",
"title": "Plugin Verbosity",
"description": "Select the verbosity of this plugin. 'Verbose' will show debugging messages, "
"including events received from connected Arlo cameras.",
"value": self.plugin_verbosity,
"choices": sorted(self.plugin_verbosity_choices.keys()),
"title": "Verbose Logging",
"description": "Enable this option to show debug messages, including events received from connected Arlo cameras.",
"value": self.plugin_verbosity == "Verbose",
"type": "boolean",
},
])
return results
async def putSetting(self, key, value) -> None:
async def putSetting(self, key: str, value: SettingValue) -> None:
if not self.validate_setting(key, value):
await self.onDeviceEvent(ScryptedInterface.Settings.value, None)
return
@@ -488,13 +492,14 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
elif key == "force_reauth":
# force arlo client to be invalidated and reloaded
self.invalidate_arlo_client()
elif key == "plugin_verbosity":
self.storage.setItem(key, "Verbose" if value == "true" else "Normal")
self.propagate_verbosity()
skip_arlo_client = True
else:
self.storage.setItem(key, value)
if key == "plugin_verbosity":
self.propagate_verbosity()
skip_arlo_client = True
elif key == "arlo_transport":
if key == "arlo_transport":
self.propagate_transport()
# force arlo client to be invalidated and reloaded, but
# keep any mfa codes
@@ -523,7 +528,7 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
_ = self.arlo
await self.onDeviceEvent(ScryptedInterface.Settings.value, None)
def validate_setting(self, key: str, val: str) -> bool:
def validate_setting(self, key: str, val: SettingValue) -> bool:
if key == "refresh_interval":
try:
val = int(val)
@@ -553,7 +558,8 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
return False
return True
async def discoverDevices(self, duration: int = 0) -> None:
@ArloDeviceBase.async_print_exception_guard
async def discover_devices(self, duration: int = 0) -> None:
if not self.arlo:
raise Exception("Arlo client not connected, cannot discover devices")
@@ -568,9 +574,10 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
basestations = self.arlo.GetDevices(['basestation', 'siren'])
for basestation in basestations:
nativeId = basestation["deviceId"]
self.logger.debug(f"Adding {nativeId}")
if nativeId in self.arlo_basestations:
self.logger.info(f"Skipping basestation {nativeId} as it already exists")
self.logger.info(f"Skipping basestation {nativeId} ({basestation['modelId']}) as it has already been added")
continue
self.arlo_basestations[nativeId] = basestation
@@ -582,41 +589,55 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
# for basestations, we want to add them to the top level DeviceProvider
provider_to_device_map.setdefault(None, []).append(manifest)
# add any builtin child devices
provider_to_device_map.setdefault(nativeId, []).extend(device.get_builtin_child_device_manifests())
# we also want to trickle discover them so they are added without deleting all existing
# we want to trickle discover them so they are added without deleting all existing
# root level devices - this is for backward compatibility
await scrypted_sdk.deviceManager.onDeviceDiscovered(manifest)
# add any builtin child devices and trickle discover them
child_manifests = device.get_builtin_child_device_manifests()
for child_manifest in child_manifests:
await scrypted_sdk.deviceManager.onDeviceDiscovered(child_manifest)
provider_to_device_map.setdefault(child_manifest["providerNativeId"], []).append(child_manifest)
self.logger.info(f"Discovered {len(basestations)} basestations")
cameras = self.arlo.GetDevices(['camera', "arloq", "arloqs", "doorbell"])
for camera in cameras:
nativeId = camera["deviceId"]
self.logger.debug(f"Adding {nativeId}")
if camera["deviceId"] != camera["parentId"] and camera["parentId"] not in self.arlo_basestations:
self.logger.info(f"Skipping camera {camera['deviceId']} because its basestation was not found")
self.logger.info(f"Skipping camera {camera['deviceId']} ({camera['modelId']}) because its basestation was not found")
continue
nativeId = camera["deviceId"]
if nativeId in self.arlo_cameras:
self.logger.info(f"Skipping camera {nativeId} as it already exists")
self.logger.info(f"Skipping camera {nativeId} ({camera['modelId']}) as it has already been added")
continue
self.arlo_cameras[nativeId] = camera
device = await self.getDevice(nativeId)
scrypted_interfaces = device.get_applicable_interfaces()
manifest = device.get_device_manifest()
self.logger.debug(f"Interfaces for {nativeId} ({camera['modelId']}): {scrypted_interfaces}")
if camera["deviceId"] == camera["parentId"]:
# these are standalone cameras with no basestation, so they act as their
# own basestation
self.arlo_basestations[camera["deviceId"]] = camera
device: ArloDeviceBase = await self.getDevice(nativeId)
scrypted_interfaces = device.get_applicable_interfaces()
manifest = device.get_device_manifest()
self.logger.debug(f"Interfaces for {nativeId} ({camera['modelId']}): {scrypted_interfaces}")
if camera["deviceId"] == camera["parentId"]:
provider_to_device_map.setdefault(None, []).append(manifest)
else:
provider_to_device_map.setdefault(camera["parentId"], []).append(manifest)
# add any builtin child devices
provider_to_device_map.setdefault(nativeId, []).extend(device.get_builtin_child_device_manifests())
# trickle discover this camera so it exists for later steps
await scrypted_sdk.deviceManager.onDeviceDiscovered(manifest)
# add any builtin child devices and trickle discover them
child_manifests = device.get_builtin_child_device_manifests()
for child_manifest in child_manifests:
await scrypted_sdk.deviceManager.onDeviceDiscovered(child_manifest)
provider_to_device_map.setdefault(child_manifest["providerNativeId"], []).append(child_manifest)
camera_devices.append(manifest)
@@ -638,7 +659,7 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
"devices": provider_to_device_map[None]
})
async def getDevice(self, nativeId: str) -> ScryptedDeviceBase:
async def getDevice(self, nativeId: str) -> ArloDeviceBase:
ret = self.scrypted_devices.get(nativeId, None)
if ret is None:
ret = self.create_device(nativeId)
@@ -646,21 +667,19 @@ class ArloProvider(ScryptedDeviceBase, Settings, DeviceProvider, DeviceDiscovery
self.scrypted_devices[nativeId] = ret
return ret
def create_device(self, nativeId: str) -> ScryptedDeviceBase:
from .camera import ArloCamera
from .doorbell import ArloDoorbell
from .basestation import ArloBasestation
def create_device(self, nativeId: str) -> ArloDeviceBase:
if nativeId not in self.arlo_cameras and nativeId not in self.arlo_basestations:
self.logger.warning(f"Cannot create device for nativeId {nativeId}, maybe it hasn't been loaded yet?")
return None
arlo_device = self.arlo_cameras.get(nativeId)
if not arlo_device:
# this is a basestation, so build the basestation object
arlo_device = self.arlo_basestations[nativeId]
return ArloBasestation(nativeId, arlo_device, arlo_device, self)
return ArloBasestation(nativeId, arlo_device, self)
if arlo_device["parentId"] not in self.arlo_basestations:
self.logger.warning(f"Cannot create camera with nativeId {nativeId} when {arlo_device['parentId']} is not a valid basestation")
return None
arlo_basestation = self.arlo_basestations[arlo_device["parentId"]]

View File

@@ -1,17 +1,71 @@
from scrypted_sdk.types import OnOff, ScryptedInterface
from __future__ import annotations
from .device_base import ArloDeviceBase
from typing import List, TYPE_CHECKING
from scrypted_sdk.types import OnOff, SecuritySystemMode, ScryptedInterface, ScryptedDeviceType
from .base import ArloDeviceBase
if TYPE_CHECKING:
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
from .provider import ArloProvider
from .vss import ArloSirenVirtualSecuritySystem
class ArloSiren(ArloDeviceBase, OnOff):
vss: ArloSirenVirtualSecuritySystem = None
def get_applicable_interfaces(self) -> list:
def __init__(self, nativeId: str, arlo_device: dict, arlo_basestation: dict, provider: ArloProvider, vss: ArloSirenVirtualSecuritySystem) -> None:
super().__init__(nativeId=nativeId, arlo_device=arlo_device, arlo_basestation=arlo_basestation, provider=provider)
self.vss = vss
def get_applicable_interfaces(self) -> List[str]:
return [ScryptedInterface.OnOff.value]
async def turnOn(self) -> None:
self.logger.info("Turning on")
self.provider.arlo.SirenOn(self.arlo_device)
def get_device_type(self) -> str:
return ScryptedDeviceType.Siren.value
@ArloDeviceBase.async_print_exception_guard
async def turnOn(self) -> None:
from .basestation import ArloBasestation
self.logger.info("Turning on")
if self.vss.securitySystemState["mode"] == SecuritySystemMode.Disarmed.value:
self.logger.info("Virtual security system is disarmed, ignoring trigger")
# set and unset this property to force homekit to display the
# switch as off
self.on = True
self.on = False
self.vss.securitySystemState = {
**self.vss.securitySystemState,
"triggered": False,
}
return
if isinstance(self.vss.parent, ArloBasestation):
self.logger.debug("Parent device is a basestation")
self.provider.arlo.SirenOn(self.arlo_basestation)
else:
self.logger.debug("Parent device is a camera")
self.provider.arlo.SirenOn(self.arlo_basestation, self.arlo_device)
self.on = True
self.vss.securitySystemState = {
**self.vss.securitySystemState,
"triggered": True,
}
@ArloDeviceBase.async_print_exception_guard
async def turnOff(self) -> None:
from .basestation import ArloBasestation
self.logger.info("Turning off")
self.provider.arlo.SirenOff(self.arlo_device)
if isinstance(self.vss.parent, ArloBasestation):
self.provider.arlo.SirenOff(self.arlo_basestation)
else:
self.provider.arlo.SirenOff(self.arlo_basestation, self.arlo_device)
self.on = False
self.vss.securitySystemState = {
**self.vss.securitySystemState,
"triggered": False,
}

View File

@@ -0,0 +1,53 @@
from __future__ import annotations
from typing import List, TYPE_CHECKING
from scrypted_sdk.types import OnOff, ScryptedInterface, ScryptedDeviceType
from .base import ArloDeviceBase
if TYPE_CHECKING:
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
from .provider import ArloProvider
from .camera import ArloCamera
class ArloSpotlight(ArloDeviceBase, OnOff):
camera: ArloCamera = None
def __init__(self, nativeId: str, arlo_device: dict, arlo_basestation: dict, provider: ArloProvider, camera: ArloCamera) -> None:
super().__init__(nativeId=nativeId, arlo_device=arlo_device, arlo_basestation=arlo_basestation, provider=provider)
self.camera = camera
def get_applicable_interfaces(self) -> List[str]:
return [ScryptedInterface.OnOff.value]
def get_device_type(self) -> str:
return ScryptedDeviceType.Light.value
@ArloDeviceBase.async_print_exception_guard
async def turnOn(self) -> None:
self.logger.info("Turning on")
self.provider.arlo.SpotlightOn(self.arlo_basestation, self.arlo_device)
self.on = True
@ArloDeviceBase.async_print_exception_guard
async def turnOff(self) -> None:
self.logger.info("Turning off")
self.provider.arlo.SpotlightOff(self.arlo_basestation, self.arlo_device)
self.on = False
class ArloFloodlight(ArloSpotlight):
@ArloDeviceBase.async_print_exception_guard
async def turnOn(self) -> None:
self.logger.info("Turning on")
self.provider.arlo.FloodlightOn(self.arlo_basestation, self.arlo_device)
self.on = True
@ArloDeviceBase.async_print_exception_guard
async def turnOff(self) -> None:
self.logger.info("Turning off")
self.provider.arlo.FloodlightOff(self.arlo_basestation, self.arlo_device)
self.on = False

View File

@@ -2,12 +2,12 @@ import asyncio
class BackgroundTaskMixin:
def create_task(self, coroutine):
def create_task(self, coroutine) -> asyncio.Task:
task = asyncio.get_event_loop().create_task(coroutine)
self.register_task(task)
return task
def register_task(self, task):
def register_task(self, task) -> None:
if not hasattr(self, "background_tasks"):
self.background_tasks = set()
@@ -21,6 +21,8 @@ class BackgroundTaskMixin:
task.add_done_callback(print_exception)
task.add_done_callback(self.background_tasks.discard)
def cancel_pending_tasks(self):
def cancel_pending_tasks(self) -> None:
if not hasattr(self, "background_tasks"):
return
for task in self.background_tasks:
task.cancel()

View File

@@ -0,0 +1,154 @@
from __future__ import annotations
import asyncio
from typing import List, TYPE_CHECKING
from scrypted_sdk.types import Device, DeviceProvider, Setting, Settings, SettingValue, SecuritySystem, SecuritySystemMode, Readme, ScryptedInterface, ScryptedDeviceType
from .base import ArloDeviceBase
from .siren import ArloSiren
if TYPE_CHECKING:
# https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/
from .provider import ArloProvider
from .basestation import ArloBasestation
from .camera import ArloCamera
class ArloSirenVirtualSecuritySystem(ArloDeviceBase, SecuritySystem, DeviceProvider):
"""A virtual, emulated security system that controls when scrypted events can trip the real physical siren."""
SUPPORTED_MODES = [SecuritySystemMode.AwayArmed.value, SecuritySystemMode.HomeArmed.value, SecuritySystemMode.Disarmed.value]
siren: ArloSiren = None
parent: ArloBasestation | ArloCamera = None
def __init__(self, nativeId: str, arlo_device: dict, arlo_basestation: dict, provider: ArloProvider, parent: ArloBasestation | ArloCamera) -> None:
super().__init__(nativeId=nativeId, arlo_device=arlo_device, arlo_basestation=arlo_basestation, provider=provider)
self.parent = parent
self.create_task(self.delayed_init())
@property
def mode(self) -> str:
mode = self.storage.getItem("mode")
if mode is None or mode not in ArloSirenVirtualSecuritySystem.SUPPORTED_MODES:
mode = SecuritySystemMode.Disarmed.value
return mode
@mode.setter
def mode(self, mode: str) -> None:
if mode not in ArloSirenVirtualSecuritySystem.SUPPORTED_MODES:
raise ValueError(f"invalid mode {mode}")
self.storage.setItem("mode", mode)
self.securitySystemState = {
**self.securitySystemState,
"mode": mode,
}
self.create_task(self.onDeviceEvent(ScryptedInterface.Settings.value, None))
async def delayed_init(self) -> None:
iterations = 1
while not self.stop_subscriptions:
if iterations > 100:
self.logger.error("Delayed init exceeded iteration limit, giving up")
return
try:
self.securitySystemState = {
"supportedModes": ArloSirenVirtualSecuritySystem.SUPPORTED_MODES,
"mode": self.mode,
}
return
except Exception as e:
self.logger.debug(f"Delayed init failed, will try again: {e}")
await asyncio.sleep(0.1)
iterations += 1
def get_applicable_interfaces(self) -> List[str]:
return [
ScryptedInterface.SecuritySystem.value,
ScryptedInterface.DeviceProvider.value,
ScryptedInterface.Settings.value,
ScryptedInterface.Readme.value,
]
def get_device_type(self) -> str:
return ScryptedDeviceType.SecuritySystem.value
def get_builtin_child_device_manifests(self) -> List[Device]:
siren = self.get_or_create_siren()
return [
{
"info": {
"model": f"{self.arlo_device['modelId']} {self.arlo_device['properties'].get('hwVersion', '')}".strip(),
"manufacturer": "Arlo",
"firmware": self.arlo_device.get("firmwareVersion"),
"serialNumber": self.arlo_device["deviceId"],
},
"nativeId": siren.nativeId,
"name": f'{self.arlo_device["deviceName"]} Siren',
"interfaces": siren.get_applicable_interfaces(),
"type": siren.get_device_type(),
"providerNativeId": self.nativeId,
}
]
async def getSettings(self) -> List[Setting]:
return [
{
"key": "mode",
"title": "Arm Mode",
"description": "If disarmed, the associated siren will not be physically triggered even if toggled.",
"value": self.mode,
"choices": ArloSirenVirtualSecuritySystem.SUPPORTED_MODES,
},
]
async def putSetting(self, key: str, value: SettingValue) -> None:
if key != "mode":
raise ValueError(f"invalid setting {key}")
self.mode = value
if self.mode == SecuritySystemMode.Disarmed.value:
await self.get_or_create_siren().turnOff()
async def getReadmeMarkdown(self) -> str:
return """
# Virtual Security System for Arlo Sirens
This security system device is not a real physical device, but a virtual, emulated device provided by the Arlo Scrypted plugin. Its purpose is to grant security system semantics of Arm/Disarm to avoid the accidental, unwanted triggering of the real physical siren through integrations such as Homekit.
To allow the siren to trigger, set the Arm Mode to any of the Armed options. When Disarmed, any triggers of the siren will be ignored. Switching modes will not perform any changes to Arlo cloud or your Arlo account, but rather only to this Scrypted device.
If this virtual security system is synced to Homekit, the siren device will be merged into the same security system accessory as a switch. The siren device will not be added as a separate accessory. To access the siren as a switch without the security system, disable syncing of the virtual security system and enable syncing of the siren, then ensure that the virtual security system is armed manually in its settings in Scrypted.
""".strip()
async def getDevice(self, nativeId: str) -> ArloDeviceBase:
if not nativeId.endswith("siren"):
return None
return self.get_or_create_siren()
def get_or_create_siren(self) -> ArloSiren:
siren_id = f'{self.arlo_device["deviceId"]}.siren'
if not self.siren:
self.siren = ArloSiren(siren_id, self.arlo_device, self.arlo_basestation, self.provider, self)
return self.siren
async def armSecuritySystem(self, mode: SecuritySystemMode) -> None:
self.logger.info(f"Arming {mode}")
self.mode = mode
self.securitySystemState = {
**self.securitySystemState,
"mode": mode,
}
if mode == SecuritySystemMode.Disarmed.value:
await self.get_or_create_siren().turnOff()
@ArloDeviceBase.async_print_exception_guard
async def disarmSecuritySystem(self) -> None:
self.logger.info(f"Disarming")
self.mode = SecuritySystemMode.Disarmed.value
self.securitySystemState = {
**self.securitySystemState,
"mode": SecuritySystemMode.Disarmed.value,
}
await self.get_or_create_siren().turnOff()

View File

@@ -1,6 +1,7 @@
paho-mqtt==1.6.1
sseclient==0.0.22
requests
requests==2.28.2
cachetools==5.3.0
scrypted-arlo-go==0.0.1
--extra-index-url=https://www.piwheels.org/simple/
--extra-index-url=https://bjia56.github.io/scrypted-arlo-go/

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/coreml",
"version": "0.1.5",
"version": "0.1.8",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/coreml",
"version": "0.1.5",
"version": "0.1.8",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -41,5 +41,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.5"
"version": "0.1.8"
}

View File

@@ -9,7 +9,7 @@ from PIL import Image
import asyncio
import concurrent.futures
predictExecutor = concurrent.futures.ThreadPoolExecutor(2, "CoreML-Predict")
predictExecutor = concurrent.futures.ThreadPoolExecutor(8, "CoreML-Predict")
def parse_label_contents(contents: str):
lines = contents.splitlines()

View File

@@ -1 +0,0 @@
../../tensorflow-lite/src/pipeline

View File

@@ -1,10 +1,5 @@
# plugin
Pillow>=5.4.1
PyGObject>=3.30.4
coremltools~=6.1
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
coremltools
# sort_oh
scipy
filterpy
numpy
# pillow for anything not intel linux, pillow-simd is available on x64 linux
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/homekit",
"version": "1.2.20",
"version": "1.2.21",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/homekit",
"version": "1.2.20",
"version": "1.2.21",
"dependencies": {
"@koush/werift-src": "file:../../external/werift",
"check-disk-space": "^3.3.1",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/homekit",
"version": "1.2.20",
"version": "1.2.21",
"description": "HomeKit Plugin for Scrypted",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",

View File

@@ -1,5 +1,5 @@
import { Deferred } from '@scrypted/common/src/deferred';
import sdk, { AudioSensor, Camera, Intercom, MotionSensor, ObjectsDetected, OnOff, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, VideoCamera, VideoCameraConfiguration } from '@scrypted/sdk';
import sdk, { AudioSensor, Camera, Intercom, MotionSensor, ObjectsDetected, OnOff, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, DeviceProvider, VideoCamera, VideoCameraConfiguration } from '@scrypted/sdk';
import { defaultObjectDetectionContactSensorTimeout } from '../camera-mixin';
import { addSupportedType, bindCharacteristic, DummyDevice } from '../common';
import { AudioRecordingCodec, AudioRecordingCodecType, AudioRecordingSamplerate, AudioStreamingCodec, AudioStreamingCodecType, AudioStreamingSamplerate, CameraController, CameraRecordingConfiguration, CameraRecordingDelegate, CameraRecordingOptions, CameraStreamingOptions, Characteristic, CharacteristicEventTypes, H264Level, H264Profile, MediaContainerType, OccupancySensor, RecordingPacket, Service, SRTPCryptoSuites, VideoCodecType, WithUUID } from '../hap';
@@ -7,7 +7,7 @@ import type { HomeKitPlugin } from '../main';
import { handleFragmentsRequests, iframeIntervalSeconds } from './camera/camera-recording';
import { createCameraStreamingDelegate } from './camera/camera-streaming';
import { FORCE_OPUS } from './camera/camera-utils';
import { makeAccessory } from './common';
import { makeAccessory, mergeOnOffDevicesByType } from './common';
const { deviceManager, systemManager } = sdk;
@@ -303,6 +303,15 @@ addSupportedType({
}
}
if (device.interfaces.includes(ScryptedInterface.DeviceProvider)) {
// merge in lights
const { devices } = mergeOnOffDevicesByType(device as ScryptedDevice as ScryptedDevice & DeviceProvider, accessory, ScryptedDeviceType.Light);
// ensure child devices are skipped by the rest of homekit by
// reporting that they've been merged
devices.map(device => homekitPlugin.mergedDevices.add(device.id));
}
return accessory;
}
});

View File

@@ -15,9 +15,9 @@ import os from 'os';
import { getAddressOverride } from '../../address-override';
import { AudioStreamingCodecType, CameraController, CameraStreamingDelegate, PrepareStreamCallback, PrepareStreamRequest, PrepareStreamResponse, StartStreamRequest, StreamingRequest, StreamRequestCallback, StreamRequestTypes } from '../../hap';
import type { HomeKitPlugin } from "../../main";
import { createReturnAudioSdp } from './camera-return-audio';
import { createSnapshotHandler } from '../camera/camera-snapshot';
import { getDebugMode } from './camera-debug-mode-storage';
import { createReturnAudioSdp } from './camera-return-audio';
import { startCameraStreamFfmpeg } from './camera-streaming-ffmpeg';
import { CameraStreamingSession } from './camera-streaming-session';
import { getStreamingConfiguration } from './camera-utils';
@@ -375,6 +375,12 @@ export function createCameraStreamingDelegate(device: ScryptedDevice & VideoCame
let playing = false;
session.audioReturn.once('message', async buffer => {
try {
const decrypted = srtpSession.decrypt(buffer);
const rtp = RtpPacket.deSerialize(decrypted);
if (rtp.header.payloadType !== session.startRequest.audio.pt)
return;
const { clientPromise, url } = await listenZeroSingleClient();
const rtspUrl = url.replace('tcp', 'rtsp');
let sdp = createReturnAudioSdp(session.startRequest.audio);

View File

@@ -64,6 +64,9 @@ export class H264Repacketizer {
extraPackets = 0;
fuaMax: number;
pendingFuA: RtpPacket[];
// log whether a stapa sps/pps has been seen.
// resets on every idr frame, to trigger codec information
// to be resent.
seenStapASps = false;
fuaMin: number;
@@ -402,8 +405,12 @@ export class H264Repacketizer {
// if this is an idr frame, but no sps has been sent via a stapa, dummy one up.
// the stream may not contain codec information in stapa or may be sending it
// in separate sps/pps packets which is not supported by homekit.
if (originalNalType === NAL_TYPE_IDR && !this.seenStapASps)
this.maybeSendSpsPps(packet, ret);
if (originalNalType === NAL_TYPE_IDR) {
if (!this.seenStapASps)
this.maybeSendSpsPps(packet, ret);
this.seenStapASps = false;
}
}
else {
if (this.pendingFuA) {
@@ -486,10 +493,12 @@ export class H264Repacketizer {
return;
}
if (nalType === NAL_TYPE_IDR && !this.seenStapASps) {
if (nalType === NAL_TYPE_IDR) {
// if this is an idr frame, but no sps has been sent, dummy one up.
// the stream may not contain sps.
this.maybeSendSpsPps(packet, ret);
if (!this.seenStapASps)
this.maybeSendSpsPps(packet, ret);
this.seenStapASps = false;
}
this.fragment(packet, ret);

View File

@@ -167,29 +167,30 @@ export function addFan(device: ScryptedDevice & Fan & OnOff, accessory: Accessor
}
/*
* addChildSirens looks for siren-type child devices of the given device provider
* and merges them as switches to the accessory represented by the device provider.
* mergeOnOffDevicesByType looks for the specified type of child devices under the
* given device provider and merges them as switches to the accessory represented
* by the device provider.
*
* Returns the services created as well as all of the child siren devices which have
* Returns the services created as well as all of the child OnOff devices which have
* been merged.
*/
export function addChildSirens(device: ScryptedDevice & DeviceProvider, accessory: Accessory): { services: Service[], devices: (ScryptedDevice & OnOff)[] } {
export function mergeOnOffDevicesByType(device: ScryptedDevice & DeviceProvider, accessory: Accessory, type: ScryptedDeviceType): { services: Service[], devices: (ScryptedDevice & OnOff)[] } {
if (!device.interfaces.includes(ScryptedInterface.DeviceProvider))
return undefined;
const children = getChildDevices(device);
const sirenDevices = [];
const mergedDevices = [];
const services = children.map((child: ScryptedDevice & OnOff) => {
if (child.type !== ScryptedDeviceType.Siren || !child.interfaces.includes(ScryptedInterface.OnOff))
if (child.type !== type || !child.interfaces.includes(ScryptedInterface.OnOff))
return undefined;
const onOffService = getOnOffService(child, accessory, Service.Switch)
sirenDevices.push(child);
mergedDevices.push(child);
return onOffService;
});
return {
services: services.filter(service => !!service),
devices: sirenDevices,
devices: mergedDevices,
};
}

View File

@@ -1,7 +1,7 @@
import { SecuritySystem, SecuritySystemMode, SecuritySystemObstruction, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, DeviceProvider } from '@scrypted/sdk';
import { addSupportedType, bindCharacteristic, DummyDevice } from '../common';
import { Characteristic, CharacteristicEventTypes, CharacteristicSetCallback, CharacteristicValue, Service } from '../hap';
import { makeAccessory, addChildSirens } from './common';
import { makeAccessory, mergeOnOffDevicesByType } from './common';
import type { HomeKitPlugin } from "../main";
addSupportedType({
@@ -90,7 +90,8 @@ addSupportedType({
() => !!device.securitySystemState?.triggered);
if (device.interfaces.includes(ScryptedInterface.DeviceProvider)) {
const { devices } = addChildSirens(device as ScryptedDevice as ScryptedDevice & DeviceProvider, accessory);
// merge in sirens
const { devices } = mergeOnOffDevicesByType(device as ScryptedDevice as ScryptedDevice & DeviceProvider, accessory, ScryptedDeviceType.Siren);
// ensure child devices are skipped by the rest of homekit by
// reporting that they've been merged

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/objectdetector",
"version": "0.0.113",
"version": "0.0.119",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/objectdetector",
"version": "0.0.113",
"version": "0.0.119",
"license": "Apache-2.0",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/objectdetector",
"version": "0.0.113",
"version": "0.0.119",
"description": "Scrypted Video Analysis Plugin. Installed alongside a detection service like OpenCV or TensorFlow.",
"author": "Scrypted",
"license": "Apache-2.0",

View File

@@ -1,6 +1,7 @@
import { Deferred } from "@scrypted/common/src/deferred";
import { ffmpegLogInitialOutput, safeKillFFmpeg } from "@scrypted/common/src/media-helpers";
import { ffmpegLogInitialOutput, safeKillFFmpeg, safePrintFFmpegArguments } from "@scrypted/common/src/media-helpers";
import { readLength, readLine } from "@scrypted/common/src/read-stream";
import { addVideoFilterArguments } from "@scrypted/common/src/ffmpeg-helpers";
import sdk, { FFmpegInput, Image, ImageOptions, MediaObject, ScryptedDeviceBase, ScryptedMimeTypes, VideoFrame, VideoFrameGenerator, VideoFrameGeneratorOptions } from "@scrypted/sdk";
import child_process from 'child_process';
import sharp from 'sharp';
@@ -29,7 +30,7 @@ interface RawFrame {
}
class VipsImage implements Image {
constructor(public image: sharp.Sharp, public width: number, public height: number) {
constructor(public image: sharp.Sharp, public width: number, public height: number, public channels: number) {
}
toImageInternal(options: ImageOptions) {
@@ -54,12 +55,18 @@ class VipsImage implements Image {
async toBuffer(options: ImageOptions) {
const transformed = this.toImageInternal(options);
if (options?.format === 'rgb') {
transformed.removeAlpha().toFormat('raw');
}
else if (options?.format === 'jpg') {
if (options?.format === 'jpg') {
transformed.toFormat('jpg');
}
else {
if (this.channels === 1 && (options?.format === 'gray' || !options.format))
transformed.extractChannel(0);
else if (options?.format === 'gray')
transformed.toColorspace('b-w');
else if (options?.format === 'rgb')
transformed.removeAlpha()
transformed.raw();
}
return transformed.toBuffer();
}
@@ -74,7 +81,7 @@ class VipsImage implements Image {
});
const newMetadata = await newImage.metadata();
const newVipsImage = new VipsImage(newImage, newMetadata.width, newMetadata.height);
const newVipsImage = new VipsImage(newImage, newMetadata.width, newMetadata.height, newMetadata.channels);
return newVipsImage;
}
@@ -89,20 +96,27 @@ class VipsImage implements Image {
export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements VideoFrameGenerator {
async *generateVideoFramesInternal(mediaObject: MediaObject, options?: VideoFrameGeneratorOptions, filter?: (videoFrame: VideoFrame & MediaObject) => Promise<boolean>): AsyncGenerator<VideoFrame & MediaObject, any, unknown> {
const ffmpegInput = await sdk.mediaManager.convertMediaObjectToJSON<FFmpegInput>(mediaObject, ScryptedMimeTypes.FFmpegInput);
const gray = options?.format === 'gray';
const channels = gray ? 1 : 3;
const args = [
'-hide_banner',
//'-hwaccel', 'auto',
...ffmpegInput.inputArguments,
'-vcodec', 'pam',
'-pix_fmt', 'rgb24',
'-pix_fmt', gray ? 'gray' : 'rgb24',
'-f', 'image2pipe',
'pipe:3',
];
// this seems to reduce latency.
addVideoFilterArguments(args, 'fps=10', 'fps');
const cp = child_process.spawn(await sdk.mediaManager.getFFmpegPath(), args, {
stdio: ['pipe', 'pipe', 'pipe', 'pipe'],
});
ffmpegLogInitialOutput(this.console, cp);
const console = mediaObject?.sourceId ? sdk.deviceManager.getMixinConsole(mediaObject.sourceId) : this.console;
safePrintFFmpegArguments(console, args);
ffmpegLogInitialOutput(console, cp);
let finished = false;
let frameDeferred: Deferred<RawFrame>;
@@ -121,7 +135,7 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
}
if (headers['TUPLTYPE'] !== 'RGB')
if (headers['TUPLTYPE'] !== 'RGB' && headers['TUPLTYPE'] !== 'GRAYSCALE')
throw new Error(`Unexpected TUPLTYPE in PAM stream: ${headers['TUPLTYPE']}`);
const width = parseInt(headers['WIDTH']);
@@ -129,7 +143,7 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
if (!width || !height)
throw new Error('Invalid dimensions in PAM stream');
const length = width * height * 3;
const length = width * height * channels;
headers.clear();
const data = await readLength(readable, length);
@@ -150,7 +164,7 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
catch (e) {
}
finally {
this.console.log('finished reader');
console.log('finished reader');
finished = true;
frameDeferred?.reject(new Error('frame generator finished'));
}
@@ -167,20 +181,24 @@ export class FFmpegVideoFrameGenerator extends ScryptedDeviceBase implements Vid
raw: {
width,
height,
channels: 3,
channels,
}
});
const vipsImage = new VipsImage(image, width, height);
const mo = await createVipsMediaObject(vipsImage);
yield mo;
vipsImage.image.destroy();
vipsImage.image = undefined;
const vipsImage = new VipsImage(image, width, height, channels);
try {
const mo = await createVipsMediaObject(vipsImage);
yield mo;
}
finally {
vipsImage.image = undefined;
image.destroy();
}
}
}
catch (e) {
}
finally {
this.console.log('finished generator');
console.log('finished generator');
finished = true;
safeKillFFmpeg(cp);
}

View File

@@ -1,10 +1,9 @@
import sdk, { Camera, DeviceProvider, DeviceState, EventListenerRegister, MediaObject, MediaStreamDestination, MixinDeviceBase, MixinProvider, MotionSensor, ObjectDetection, ObjectDetectionCallbacks, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, ScryptedNativeId, Setting, Settings, SettingValue, VideoCamera, VideoFrame, VideoFrameGenerator } from '@scrypted/sdk';
import sdk, { Camera, DeviceProvider, DeviceState, EventListenerRegister, MediaObject, MediaStreamDestination, MixinDeviceBase, MixinProvider, MotionSensor, ObjectDetection, ObjectDetectionModel, ObjectDetectionResult, ObjectDetectionTypes, ObjectDetector, ObjectsDetected, ScryptedDevice, ScryptedDeviceType, ScryptedInterface, ScryptedMimeTypes, ScryptedNativeId, Setting, Settings, SettingValue, VideoCamera, VideoFrame, VideoFrameGenerator } from '@scrypted/sdk';
import { StorageSettings } from '@scrypted/sdk/storage-settings';
import crypto from 'crypto';
import cloneDeep from 'lodash/cloneDeep';
import { AutoenableMixinProvider } from "../../../common/src/autoenable-mixin-provider";
import { SettingsMixinDeviceBase } from "../../../common/src/settings-mixin";
import { DenoisedDetectionEntry, DenoisedDetectionState, denoiseDetections } from './denoise';
import { DenoisedDetectionState } from './denoise';
import { FFmpegVideoFrameGenerator } from './ffmpeg-videoframes';
import { serverSupportsMixinEventMasking } from './server-version';
import { sleep } from './sleep';
@@ -19,8 +18,6 @@ const defaultDetectionDuration = 20;
const defaultDetectionInterval = 60;
const defaultDetectionTimeout = 60;
const defaultMotionDuration = 10;
const defaultScoreThreshold = .2;
const defaultSecondScoreThreshold = .7;
const BUILTIN_MOTION_SENSOR_ASSIST = 'Assist';
const BUILTIN_MOTION_SENSOR_REPLACE = 'Replace';
@@ -44,9 +41,8 @@ type TrackedDetection = ObjectDetectionResult & {
bestSecondPassScore?: number;
};
class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera & MotionSensor & ObjectDetector> implements ObjectDetector, Settings, ObjectDetectionCallbacks {
class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera & MotionSensor & ObjectDetector> implements ObjectDetector, Settings {
motionListener: EventListenerRegister;
detectorListener: EventListenerRegister;
motionMixinListener: EventListenerRegister;
detections = new Map<string, MediaObject>();
cameraDevice: ScryptedDevice & Camera & VideoCamera & MotionSensor & ObjectDetector;
@@ -81,16 +77,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
this.maybeStartMotionDetection();
}
},
captureMode: {
title: 'Capture Mode',
description: 'The method to capture frames for analysis. Video will require more processing power.',
choices: [
'Default',
'Video',
'Snapshot',
],
defaultValue: 'Default',
},
detectionDuration: {
title: 'Detection Duration',
subgroup: 'Advanced',
@@ -121,23 +107,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
defaultValue: defaultDetectionInterval,
hide: true,
},
scoreThreshold: {
title: 'Minimum Detection Confidence',
subgroup: 'Advanced',
description: 'Higher values eliminate false positives and low quality recognition candidates.',
type: 'number',
placeholder: '.2',
defaultValue: defaultScoreThreshold,
},
secondScoreThreshold: {
title: 'Second Pass Confidence',
subgroup: 'Advanced',
description: 'Crop and reanalyze a result from the initial detection pass to get more accurate results.',
key: 'secondScoreThreshold',
type: 'number',
defaultValue: defaultSecondScoreThreshold,
placeholder: '.7',
},
});
motionTimeout: NodeJS.Timeout;
zones = this.getZones();
@@ -178,7 +147,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
if (this.hasMotionType) {
// force a motion detection restart if it quit
if (this.motionSensorSupplementation === BUILTIN_MOTION_SENSOR_REPLACE)
await this.startStreamAnalysis();
await this.startPipelineAnalysis();
return;
}
}, this.storageSettings.values.detectionInterval * 1000);
@@ -216,30 +185,16 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
return ret;
}
async snapshotDetection() {
const picture = await this.cameraDevice.takePicture();
let detections = await this.objectDetection.detectObjects(picture, {
detectionId: this.detectionId,
settings: this.getCurrentSettings(),
});
detections = await this.trackObjects(detections, true);
this.reportObjectDetections(detections);
}
async maybeStartMotionDetection() {
if (!this.hasMotionType)
return;
if (this.motionSensorSupplementation !== BUILTIN_MOTION_SENSOR_REPLACE)
return;
await this.startStreamAnalysis();
await this.startPipelineAnalysis();
}
endObjectDetection() {
this.detectorRunning = false;
this.objectDetection?.detectObjects(undefined, {
detectionId: this.detectionId,
settings: this.getCurrentSettings(),
});
}
bindObjectDetection() {
@@ -247,60 +202,30 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
this.motionDetected = false;
this.detectorRunning = false;
this.detectorListener?.removeListener();
this.detectorListener = undefined;
this.endObjectDetection();
this.maybeStartMotionDetection();
}
async register() {
const model = await this.objectDetection.getDetectionModel();
if (!this.hasMotionType) {
if (model.triggerClasses?.includes('motion')) {
this.motionListener = this.cameraDevice.listen(ScryptedInterface.MotionSensor, async () => {
if (!this.cameraDevice.motionDetected) {
if (this.detectorRunning) {
// allow anaysis due to user request.
if (this.analyzeStop > Date.now())
return;
this.console.log('motion stopped, cancelling ongoing detection')
this.endObjectDetection();
}
return;
}
await this.startStreamAnalysis();
});
}
const nonMotion = model.triggerClasses?.find(t => t !== 'motion');
if (nonMotion) {
this.detectorListener = this.cameraDevice.listen(ScryptedInterface.ObjectDetector, async (s, d, data: ObjectsDetected) => {
if (!model.triggerClasses)
return;
if (!data.detectionId)
return;
const { detections } = data;
if (!detections?.length)
return;
const set = new Set(detections.map(d => d.className));
for (const trigger of model.triggerClasses) {
if (trigger === 'motion')
continue;
if (set.has(trigger)) {
const jpeg = await this.cameraDevice.getDetectionInput(data.detectionId, data.eventId);
const found = await this.objectDetection.detectObjects(jpeg);
found.detectionId = data.detectionId;
this.handleDetectionEvent(found, undefined, jpeg);
this.motionListener = this.cameraDevice.listen(ScryptedInterface.MotionSensor, async () => {
if (!this.cameraDevice.motionDetected) {
if (this.detectorRunning) {
// allow anaysis due to user request.
if (this.analyzeStop > Date.now())
return;
}
this.console.log('motion stopped, cancelling ongoing detection')
this.endObjectDetection();
}
});
}
return;
}
await this.startPipelineAnalysis();
});
return;
}
@@ -317,7 +242,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
return;
if (!this.detectorRunning)
this.console.log('built in motion sensor started motion, starting video detection.');
await this.startStreamAnalysis();
await this.startPipelineAnalysis();
return;
}
@@ -332,163 +257,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
}
}
async handleDetectionEvent(detection: ObjectsDetected, redetect?: (boundingBox: [number, number, number, number]) => Promise<ObjectDetectionResult[]>, mediaObject?: MediaObject) {
this.detectorRunning = detection.running;
detection = await this.trackObjects(detection);
// apply the zones to the detections and get a shallow copy list of detections after
// exclusion zones have applied
const zonedDetections = this.applyZones(detection)
.filter(d => {
if (!d.zones?.length)
return d.bestSecondPassScore >= this.secondScoreThreshold || d.score >= this.scoreThreshold;
for (const zone of d.zones || []) {
const zi = this.zoneInfos[zone];
const scoreThreshold = zi?.scoreThreshold || this.scoreThreshold;
const secondScoreThreshold = zi?.secondScoreThreshold || this.secondScoreThreshold;
// keep the object if it passes the score check, or has already passed a second score check.
if (d.bestSecondPassScore >= secondScoreThreshold || d.score >= scoreThreshold)
return true;
}
});
let retainImage = false;
if (!this.hasMotionType && redetect && this.secondScoreThreshold && detection.detections) {
const detections = detection.detections as TrackedDetection[];
const newOrBetterDetections = zonedDetections.filter(d => d.newOrBetterDetection);
detections?.forEach(d => d.newOrBetterDetection = false);
// anything with a higher pass initial score should be redetected
// as it may yield a better second pass score and thus a better thumbnail.
await Promise.allSettled(newOrBetterDetections.map(async d => {
const maybeUpdateSecondPassScore = (secondPassScore: number) => {
let better = false;
// initialize second pass result
if (!d.bestSecondPassScore) {
better = true;
d.bestSecondPassScore = 0;
}
// retain passing the second pass threshold for first time.
if (d.bestSecondPassScore < this.secondScoreThreshold && secondPassScore >= this.secondScoreThreshold) {
this.console.log('improved', d.id, secondPassScore, d.score);
better = true;
retainImage = true;
}
else if (secondPassScore > d.bestSecondPassScore * 1.1) {
this.console.log('improved', d.id, secondPassScore, d.score);
better = true;
retainImage = true;
}
if (better)
d.bestSecondPassScore = secondPassScore;
return better;
}
// the initial score may be sufficient.
if (d.score >= this.secondScoreThreshold) {
maybeUpdateSecondPassScore(d.score);
return;
}
const redetected = await redetect(d.boundingBox);
const best = redetected.filter(r => r.className === d.className).sort((a, b) => b.score - a.score)?.[0];
if (best) {
if (maybeUpdateSecondPassScore(best.score)) {
d.boundingBox = best.boundingBox;
}
}
}));
const secondPassDetections = zonedDetections.filter(d => d.bestSecondPassScore >= this.secondScoreThreshold)
.map(d => ({
...d,
score: d.bestSecondPassScore,
}));
detection.detections = secondPassDetections;
}
else {
detection.detections = zonedDetections;
}
if (detection.detections) {
const trackedDetections = cloneDeep(detection.detections) as TrackedDetection[];
for (const d of trackedDetections) {
delete d.bestScore;
delete d.bestSecondPassScore;
delete d.newOrBetterDetection;
}
detection.detections = trackedDetections;
}
const now = Date.now();
if (this.lastDetectionInput + this.storageSettings.values.detectionTimeout * 1000 < Date.now())
retainImage = true;
if (retainImage && mediaObject) {
this.lastDetectionInput = now;
this.setDetection(detection, mediaObject);
}
this.reportObjectDetections(detection);
return retainImage;
}
get scoreThreshold() {
return parseFloat(this.storage.getItem('scoreThreshold')) || defaultScoreThreshold;
}
get secondScoreThreshold() {
const r = parseFloat(this.storage.getItem('secondScoreThreshold'));
if (isNaN(r))
return defaultSecondScoreThreshold;
return r;
}
async onDetection(detection: ObjectsDetected, redetect?: (boundingBox: [number, number, number, number]) => Promise<ObjectDetectionResult[]>, mediaObject?: MediaObject): Promise<boolean> {
// detection.detections = detection.detections?.filter(d => d.score >= this.scoreThreshold);
return this.handleDetectionEvent(detection, redetect, mediaObject);
}
async onDetectionEnded(detection: ObjectsDetected): Promise<void> {
this.handleDetectionEvent(detection);
}
async startSnapshotAnalysis() {
if (this.detectorRunning)
return;
this.detectorRunning = true;
this.analyzeStop = Date.now() + this.getDetectionDuration();
while (this.detectorRunning) {
const now = Date.now();
if (now > this.analyzeStop)
break;
try {
const mo = await this.mixinDevice.takePicture({
reason: 'event',
});
const found = await this.objectDetection.detectObjects(mo, {
detectionId: this.detectionId,
duration: this.getDetectionDuration(),
settings: this.getCurrentSettings(),
}, this);
}
catch (e) {
this.console.error('snapshot detection error', e);
}
// cameras tend to only refresh every 1s at best.
// maybe get this value from somewhere? or sha the jpeg?
const diff = now + 1100 - Date.now();
if (diff > 0)
await sleep(diff);
}
this.endObjectDetection();
}
async startPipelineAnalysis() {
if (this.detectorRunning)
return;
@@ -573,20 +341,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
// apply the zones to the detections and get a shallow copy list of detections after
// exclusion zones have applied
const zonedDetections = this.applyZones(detected.detected);
const filteredDetections = zonedDetections
.filter(d => {
if (!d.zones?.length)
return d.score >= this.scoreThreshold;
for (const zone of d.zones || []) {
const zi = this.zoneInfos[zone];
const scoreThreshold = zi?.scoreThreshold || this.scoreThreshold;
if (d.score >= scoreThreshold)
return true;
}
});
detected.detected.detections = filteredDetections;
detected.detected.detections = zonedDetections;
detections++;
// this.console.warn('dps', detections / (Date.now() - start) * 1000);
@@ -615,79 +370,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
}
}
async startStreamAnalysis() {
if (this.newPipeline) {
await this.startPipelineAnalysis();
}
else if (!this.hasMotionType && this.storageSettings.values.captureMode === 'Snapshot') {
await this.startSnapshotAnalysis();
}
else {
await this.startVideoDetection();
}
}
async extendedObjectDetect(force?: boolean) {
if (!this.hasMotionType && this.storageSettings.values.captureMode === 'Snapshot') {
this.analyzeStop = Date.now() + this.getDetectionDuration();
}
else {
try {
if (!force && !this.motionDetected)
return;
await this.objectDetection?.detectObjects(undefined, {
detectionId: this.detectionId,
duration: this.getDetectionDuration(),
settings: this.getCurrentSettings(),
}, this);
}
catch (e) {
// ignore any
}
}
}
async startVideoDetection() {
try {
const settings = this.getCurrentSettings();
// prevent stream retrieval noise until notified that the detection is no longer running.
if (this.detectorRunning) {
const session = await this.objectDetection?.detectObjects(undefined, {
detectionId: this.detectionId,
duration: this.getDetectionDuration(),
settings,
}, this);
this.detectorRunning = session.running;
if (this.detectorRunning)
return;
}
// dummy up the last detection time to prevent the idle timers from purging everything.
this.detectionState.lastDetection = Date.now();
this.detectorRunning = true;
let stream: MediaObject;
stream = await this.cameraDevice.getVideoStream({
destination: !this.hasMotionType ? 'local-recorder' : 'low-resolution',
// ask rebroadcast to mute audio, not needed.
audio: null,
});
const session = await this.objectDetection?.detectObjects(stream, {
detectionId: this.detectionId,
duration: this.getDetectionDuration(),
settings,
}, this);
this.detectorRunning = session.running;
}
catch (e) {
this.console.log('failure retrieving stream', e);
this.detectorRunning = false;
}
}
normalizeBox(boundingBox: [number, number, number, number], inputDimensions: [number, number]) {
let [x, y, width, height] = boundingBox;
let x2 = x + width;
@@ -806,88 +488,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
this.onDeviceEvent(ScryptedInterface.ObjectDetector, detection);
}
async trackObjects(detectionResult: ObjectsDetected, showAll?: boolean) {
// do not denoise
if (this.hasMotionType) {
return detectionResult;
}
if (!detectionResult?.detections) {
// detection session ended.
return detectionResult;
}
const { detections } = detectionResult;
const found: DenoisedDetectionEntry<TrackedDetection>[] = [];
denoiseDetections<TrackedDetection>(this.detectionState, detections.map(detection => ({
get id() {
return detection.id;
},
set id(id) {
detection.id = id;
},
name: detection.className,
score: detection.score,
detection,
get firstSeen() {
return detection.history?.firstSeen
},
set firstSeen(value) {
detection.history = detection.history || {
firstSeen: value,
lastSeen: value,
};
detection.history.firstSeen = value;
},
get lastSeen() {
return detection.history?.lastSeen
},
set lastSeen(value) {
detection.history = detection.history || {
firstSeen: value,
lastSeen: value,
};
detection.history.lastSeen = value;
},
boundingBox: detection.boundingBox,
})), {
timeout: this.storageSettings.values.detectionTimeout * 1000,
added: d => {
found.push(d);
d.detection.bestScore = d.detection.score;
d.detection.newOrBetterDetection = true;
},
removed: d => {
this.console.log('expired detection:', `${d.detection.className} (${d.detection.score})`);
if (detectionResult.running)
this.extendedObjectDetect();
},
retained: (d, o) => {
if (d.detection.score > o.detection.bestScore) {
d.detection.bestScore = d.detection.score;
d.detection.newOrBetterDetection = true;
}
else {
d.detection.bestScore = o.detection.bestScore;
}
d.detection.bestSecondPassScore = o.detection.bestSecondPassScore;
},
expiring: (d) => {
},
});
if (found.length) {
this.console.log('new detection:', found.map(d => `${d.id} ${d.detection.className} (${d.detection.score})`).join(', '));
if (detectionResult.running)
this.extendedObjectDetect();
}
if (found.length || showAll) {
this.console.log('current detections:', this.detectionState.previousDetections.map(d => `${d.detection.className} (${d.detection.score}, ${d.detection.boundingBox?.join(', ')})`).join(', '));
}
return detectionResult;
}
setDetection(detection: ObjectsDetected, detectionInput: MediaObject) {
if (!detection.detectionId)
detection.detectionId = crypto.randomBytes(4).toString('hex');
@@ -942,9 +542,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
}
get newPipeline() {
if (!this.plugin.storageSettings.values.newPipeline)
return;
const newPipeline = this.storageSettings.values.newPipeline;
if (!newPipeline)
return newPipeline;
@@ -979,8 +576,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
}
this.storageSettings.settings.motionSensorSupplementation.hide = !this.hasMotionType || !this.mixinDeviceInterfaces.includes(ScryptedInterface.MotionSensor);
this.storageSettings.settings.captureMode.hide = this.hasMotionType || !!this.plugin.storageSettings.values.newPipeline;
this.storageSettings.settings.newPipeline.hide = this.hasMotionType || !this.plugin.storageSettings.values.newPipeline;
this.storageSettings.settings.detectionDuration.hide = this.hasMotionType;
this.storageSettings.settings.detectionTimeout.hide = this.hasMotionType;
this.storageSettings.settings.motionDuration.hide = !this.hasMotionType;
@@ -988,23 +583,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
settings.push(...await this.storageSettings.getSettings());
let hideThreshold = true;
if (!this.hasMotionType) {
let hasInclusionZone = false;
for (const zone of Object.keys(this.zones)) {
const zi = this.zoneInfos[zone];
if (!zi?.exclusion) {
hasInclusionZone = true;
break;
}
}
if (!hasInclusionZone) {
hideThreshold = false;
}
}
this.storageSettings.settings.scoreThreshold.hide = hideThreshold;
this.storageSettings.settings.secondScoreThreshold.hide = hideThreshold;
settings.push({
key: 'zones',
title: 'Zones',
@@ -1048,38 +626,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
],
value: zi?.type || 'Intersect',
});
if (!this.hasMotionType) {
settings.push(
{
subgroup,
key: `zoneinfo-classes-${name}`,
title: `Detection Classes`,
description: 'The detection classes to match inside this zone. An empty list will match all classes.',
choices: (await this.getObjectTypes())?.classes || [],
value: zi?.classes || [],
multiple: true,
},
{
subgroup,
title: 'Minimum Detection Confidence',
description: 'Higher values eliminate false positives and low quality recognition candidates.',
key: `zoneinfo-scoreThreshold-${name}`,
type: 'number',
value: zi?.scoreThreshold || this.scoreThreshold,
placeholder: '.2',
},
{
subgroup,
title: 'Second Pass Confidence',
description: 'Crop and reanalyze a result from the initial detection pass to get more accurate results.',
key: `zoneinfo-secondScoreThreshold-${name}`,
type: 'number',
value: zi?.secondScoreThreshold || this.secondScoreThreshold,
placeholder: '.7',
},
);
}
}
if (!this.hasMotionType) {
@@ -1157,7 +703,7 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
if (key === 'analyzeButton') {
this.analyzeStop = Date.now() + 60000;
// await this.snapshotDetection();
await this.startStreamAnalysis();
await this.startPipelineAnalysis();
}
else {
const settings = this.getCurrentSettings();
@@ -1175,7 +721,6 @@ class ObjectDetectionMixin extends SettingsMixinDeviceBase<VideoCamera & Camera
this.clearMotionTimeout();
this.motionListener?.removeListener();
this.motionMixinListener?.removeListener();
this.detectorListener?.removeListener();
this.endObjectDetection();
}
}
@@ -1246,11 +791,6 @@ class ObjectDetectionPlugin extends AutoenableMixinProvider implements Settings,
currentMixins = new Set<ObjectDetectorMixin>();
storageSettings = new StorageSettings(this, {
newPipeline: {
title: 'New Video Pipeline',
description: 'WARNING! DO NOT ENABLE: Use the new video pipeline. Leave blank to use the legacy pipeline.',
type: 'boolean',
},
activeMotionDetections: {
title: 'Active Motion Detection Sessions',
readonly: true,

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/opencv",
"version": "0.0.69",
"version": "0.0.72",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/opencv",
"version": "0.0.69",
"version": "0.0.72",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -36,5 +36,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.0.69"
"version": "0.0.72"
}

View File

@@ -1,22 +1,21 @@
from __future__ import annotations
from time import sleep
from detect import DetectionSession, DetectPlugin
from typing import Any, List, Tuple
import numpy as np
import asyncio
import cv2
import imutils
Gst = None
try:
from gi.repository import Gst
except:
pass
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected, Setting, VideoFrame
from PIL import Image
import numpy as np
class OpenCVDetectionSession(DetectionSession):
from detect import DetectPlugin
import scrypted_sdk
from scrypted_sdk.types import (ObjectDetectionGeneratorSession,
ObjectDetectionResult,
ObjectsDetected, Setting, VideoFrame)
class OpenCVDetectionSession:
def __init__(self) -> None:
super().__init__()
self.cap: cv2.VideoCapture = None
self.previous_frame: Any = None
self.curFrame = None
@@ -110,8 +109,7 @@ class OpenCVPlugin(DetectPlugin):
blur = int(settings.get('blur', blur))
return area, threshold, interval, blur
def detect(self, detection_session: OpenCVDetectionSession, frame, src_size, convert_to_src_size) -> ObjectsDetected:
settings = detection_session.settings
def detect(self, frame, settings: Any, detection_session: OpenCVDetectionSession, src_size, convert_to_src_size) -> ObjectsDetected:
area, threshold, interval, blur = self.parse_settings(settings)
# see get_detection_input_size on undocumented size requirements for GRAY8
@@ -154,8 +152,8 @@ class OpenCVPlugin(DetectPlugin):
# if w * h != contour_area:
# print("mismatch w/h", contour_area - w * h)
x2, y2, _ = convert_to_src_size((x + w, y + h))
x, y, _ = convert_to_src_size((x, y))
x2, y2 = convert_to_src_size((x + w, y + h))
x, y = convert_to_src_size((x, y))
w = x2 - x + 1
h = y2 - y + 1
@@ -206,11 +204,24 @@ class OpenCVPlugin(DetectPlugin):
detection_session.cap = None
return super().end_session(detection_session)
async def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
# todo
raise Exception('can not run motion detection on image')
async def run_detection_videoframe(self, videoFrame: VideoFrame, detection_session: OpenCVDetectionSession) -> ObjectsDetected:
async def generateObjectDetections(self, videoFrames: Any, session: ObjectDetectionGeneratorSession = None) -> Any:
try:
ds = OpenCVDetectionSession()
videoFrames = await scrypted_sdk.sdk.connectRPCObject(videoFrames)
async for videoFrame in videoFrames:
detected = await self.run_detection_videoframe(videoFrame, session and session.get('settings'), ds)
yield {
'__json_copy_serialize_children': True,
'detected': detected,
'videoFrame': videoFrame,
}
finally:
try:
await videoFrames.aclose()
except:
pass
async def run_detection_videoframe(self, videoFrame: VideoFrame, settings: Any, detection_session: OpenCVDetectionSession) -> ObjectsDetected:
width = videoFrame.width
height = videoFrame.height
@@ -238,60 +249,8 @@ class OpenCVPlugin(DetectPlugin):
'resize': resize,
})
def convert_to_src_size(point, normalize = False):
return point[0] * scale, point[1] * scale, True
def convert_to_src_size(point):
return point[0] * scale, point[1] * scale
mat = np.ndarray((height, width, self.pixelFormatChannelCount), buffer=buffer, dtype=np.uint8)
detections = self.detect(
detection_session, mat, (width, height), convert_to_src_size)
detections = self.detect(mat, settings, detection_session, (width, height), convert_to_src_size)
return detections
async def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
if avframe.format.name != 'yuv420p' and avframe.format.name != 'yuvj420p':
mat = avframe.to_ndarray(format='gray8')
else:
mat = np.ndarray((avframe.height, avframe.width, self.pixelFormatChannelCount), buffer=avframe.planes[0], dtype=np.uint8)
detections = self.detect(
detection_session, mat, src_size, convert_to_src_size)
if not detections or not len(detections['detections']):
await self.detection_sleep(settings)
return None, None
return detections, None
async def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
buf = gst_sample.get_buffer()
caps = gst_sample.get_caps()
# can't trust the width value, compute the stride
height = caps.get_structure(0).get_value('height')
width = caps.get_structure(0).get_value('width')
result, info = buf.map(Gst.MapFlags.READ)
if not result:
return None, None
try:
mat = np.ndarray(
(height,
width,
self.pixelFormatChannelCount),
buffer=info.data,
dtype=np.uint8)
detections = self.detect(
detection_session, mat, src_size, convert_to_src_size)
# no point in triggering empty events.
finally:
buf.unmap(info)
if not detections or not len(detections['detections']):
await self.detection_sleep(settings)
return None, None
return detections, None
def create_detection_session(self):
return OpenCVDetectionSession()
async def detection_sleep(self, settings: Any):
area, threshold, interval, blur = self.parse_settings(settings)
# it is safe to block here because gstreamer creates a queue thread
await asyncio.sleep(interval / 1000)
async def detection_event_notified(self, settings: Any):
await self.detection_sleep(settings)
return await super().detection_event_notified(settings)

View File

@@ -1 +0,0 @@
../../tensorflow-lite/src/pipeline

View File

@@ -3,9 +3,6 @@ numpy>=1.16.2
# pillow for anything not intel linux
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
PyGObject>=3.30.4; sys_platform != 'win32'
imutils>=0.5.0
# not available on armhf
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
# not available on armhf
opencv-python; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
opencv-python; sys_platform != 'linux' or platform_machine == 'x86_64'

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/pam-diff",
"version": "0.0.17",
"version": "0.0.18",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/pam-diff",
"version": "0.0.17",
"version": "0.0.18",
"hasInstallScript": true,
"dependencies": {
"@types/node": "^16.6.1",

View File

@@ -43,5 +43,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.0.17"
"version": "0.0.18"
}

View File

@@ -231,7 +231,7 @@ ENDHDR
detections.push(
{
className: 'motion',
score: trigger.percent / 100,
score: 1,
boundingBox: [blob.minX, blob.minY, blob.maxX - blob.minX, blob.maxY - blob.minY],
}
)
@@ -241,7 +241,7 @@ ENDHDR
detections.push(
{
className: 'motion',
score: trigger.percent / 100,
score: 1,
}
)
}

View File

@@ -10,7 +10,7 @@
"port": 10081,
"request": "attach",
"skipFiles": [
"**/plugin-remote-worker.*",
"**/plugin-console.*",
"<node_internals>/**"
],
"autoAttachChildProcesses": true,

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/prebuffer-mixin",
"version": "0.9.77",
"version": "0.9.79",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/prebuffer-mixin",
"version": "0.9.77",
"version": "0.9.79",
"license": "Apache-2.0",
"dependencies": {
"@scrypted/common": "file:../../common",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/prebuffer-mixin",
"version": "0.9.77",
"version": "0.9.79",
"description": "Video Stream Rebroadcast, Prebuffer, and Management Plugin for Scrypted.",
"author": "Scrypted",
"license": "Apache-2.0",

View File

@@ -1,5 +1,7 @@
import { Deferred } from "@scrypted/common/src/deferred";
import { Headers, RtspServer } from "@scrypted/common/src/rtsp-server";
import fs from 'fs';
import { format } from "path";
import { Duplex } from "stream";
// non standard extension that dumps the rtp payload to a file.
@@ -28,17 +30,49 @@ export class FileRtspServer extends RtspServer {
ws?.end(() => ws?.destroy());
}
write(url: string, requestHeaders: Headers) {
this.cleanup();
this.segmentBytesWritten = 0;
async write(url: string, requestHeaders: Headers) {
const file = requestHeaders['x-scrypted-rtsp-file'];
if (!file)
return this.respond(400, 'Bad Request', requestHeaders, {});
const truncate = requestHeaders['x-scrypted-rtsp-file-truncate'];
// this.writeConsole?.log('RTSP WRITE file', file);
this.writeStream = fs.createWriteStream(file);
// truncation preparation must happen before cleanup.
let truncateWriteStream: fs.WriteStream;
if (truncate) {
try {
const d = new Deferred<number>();
fs.open(truncate, 'w', (e, fd) => {
if (e)
d.reject(e);
else
d.resolve(fd);
});
const fd = await d.promise;
try {
await fs.promises.rename(truncate, file);
truncateWriteStream = fs.createWriteStream(undefined, {
fd,
})
// this.writeConsole?.log('truncating', truncate);
}
catch (e) {
throw e;
}
}
catch (e) {
this.writeConsole?.error('RTSP WRITE error during truncate file', truncate, e);
}
}
// everything after this point must be sync due to cleanup potentially causing dangling state.
this.cleanup();
this.segmentBytesWritten = 0;
this.writeStream = truncateWriteStream || fs.createWriteStream(file);
this.writeStream.on('error', e => {
this.writeConsole?.error('RTSP WRITE error', e);
});

View File

@@ -3,7 +3,7 @@ import { AutoenableMixinProvider } from '@scrypted/common/src/autoenable-mixin-p
import { getDebugModeH264EncoderArgs, getH264EncoderArgs } from '@scrypted/common/src/ffmpeg-hardware-acceleration';
import { addVideoFilterArguments } from '@scrypted/common/src/ffmpeg-helpers';
import { handleRebroadcasterClient, ParserOptions, ParserSession, startParserSession } from '@scrypted/common/src/ffmpeg-rebroadcast';
import { closeQuiet, listenZeroSingleClient } from '@scrypted/common/src/listen-cluster';
import { closeQuiet, listenZeroSingleClient, ListenZeroSingleClientTimeoutError } from '@scrypted/common/src/listen-cluster';
import { readLength } from '@scrypted/common/src/read-stream';
import { createRtspParser, findH264NaluType, getNaluTypes, H264_NAL_TYPE_FU_B, H264_NAL_TYPE_IDR, H264_NAL_TYPE_MTAP16, H264_NAL_TYPE_MTAP32, H264_NAL_TYPE_RESERVED0, H264_NAL_TYPE_RESERVED30, H264_NAL_TYPE_RESERVED31, H264_NAL_TYPE_SEI, H264_NAL_TYPE_STAP_B, listenSingleRtspClient, RtspServer, RtspTrack } from '@scrypted/common/src/rtsp-server';
import { addTrackControls, parseSdp } from '@scrypted/common/src/sdp-utils';
@@ -946,23 +946,35 @@ class PrebufferSession {
const { isActiveClient, container, session, socketPromise, requestedPrebuffer } = options;
this.console.log('sending prebuffer', requestedPrebuffer);
// in case the client never connects, do an inactivity check.
socketPromise.catch(() => this.inactivityCheck(session, false));
socketPromise.then(socket => {
let socket: Duplex;
try {
socket = await socketPromise;
}
catch (e) {
// in case the client never connects, do an inactivity check.
this.inactivityCheck(session, false);
if (e instanceof ListenZeroSingleClientTimeoutError)
this.console.warn('client connection timed out');
else
this.console.error('client connection error', e);
return;
}
if (isActiveClient) {
this.activeClients++;
this.printActiveClients();
}
socket.once('close', () => {
if (isActiveClient) {
this.activeClients++;
this.activeClients--;
this.printActiveClients();
}
socket.once('close', () => {
if (isActiveClient) {
this.activeClients--;
this.printActiveClients();
}
this.inactivityCheck(session, isActiveClient);
})
this.inactivityCheck(session, isActiveClient);
});
handleRebroadcasterClient(socketPromise, {
handleRebroadcasterClient(socket, {
// console: this.console,
connect: (connection) => {
const now = Date.now();
@@ -1138,7 +1150,7 @@ class PrebufferSession {
}
// server.console = this.console;
await server.handlePlayback();
server.handleTeardown().finally(() => server.client.destroy());
server.handleTeardown().catch(() => {}).finally(() => server.client.destroy());
for (const track of Object.values(server.setupTracks)) {
if (track.protocol === 'udp') {
serverPortMap.set(track.codec, track);

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/python-codecs",
"version": "0.1.18",
"version": "0.1.25",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@scrypted/python-codecs",
"version": "0.1.18",
"version": "0.1.25",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/python-codecs",
"version": "0.1.18",
"version": "0.1.25",
"description": "Python Codecs for Scrypted",
"keywords": [
"scrypted",

View File

@@ -34,7 +34,7 @@ async def generateVideoFramesGstreamer(mediaObject: scrypted_sdk.MediaObject, op
else:
raise Exception('unknown container %s' % container)
elif videosrc.startswith('rtsp'):
videosrc = 'rtspsrc buffer-mode=0 location=%s protocols=tcp latency=0 is-live=false' % videosrc
videosrc = 'rtspsrc buffer-mode=0 location=%s protocols=tcp latency=0' % videosrc
if videoCodec == 'h264':
videosrc += ' ! rtph264depay ! h264parse'

View File

@@ -27,6 +27,8 @@ async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, option
# stream.codec_context.options['-analyzeduration'] = '0'
# stream.codec_context.options['-probesize'] = '500000'
gray = options and options.get('format') == 'gray'
start = 0
try:
for idx, frame in enumerate(container.decode(stream)):
@@ -39,7 +41,12 @@ async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, option
continue
# print(frame)
if vipsimage.pyvips:
vips = vipsimage.pyvips.Image.new_from_array(frame.to_ndarray(format='rgb24'))
if gray and frame.format.name.startswith('yuv') and frame.planes and len(frame.planes):
vips = vipsimage.new_from_memory(memoryview(frame.planes[0]), frame.width, frame.height, 1)
elif gray:
vips = vipsimage.pyvips.Image.new_from_array(frame.to_ndarray(format='gray'))
else:
vips = vipsimage.pyvips.Image.new_from_array(frame.to_ndarray(format='rgb24'))
vipsImage = vipsimage.VipsImage(vips)
try:
mo = await vipsimage.createVipsMediaObject(vipsImage)
@@ -48,7 +55,16 @@ async def generateVideoFramesLibav(mediaObject: scrypted_sdk.MediaObject, option
vipsImage.vipsImage = None
vips.invalidate()
else:
pil = frame.to_image()
if gray and frame.format.name.startswith('yuv') and frame.planes and len(frame.planes):
pil = pilimage.new_from_memory(memoryview(frame.planes[0]), frame.width, frame.height, 1)
elif gray:
rgb = frame.to_image()
try:
pil = rgb.convert('L')
finally:
rgb.close()
else:
pil = frame.to_image()
pilImage = pilimage.PILImage(pil)
try:
mo = await pilimage.createPILMediaObject(pilImage)

View File

@@ -21,19 +21,26 @@ class PILImage(scrypted_sdk.VideoFrame):
if not options or not options.get('format', None):
def format():
bytesArray = io.BytesIO()
pilImage.pilImage.save(bytesArray, format='JPEG')
return bytesArray.getvalue()
return pilImage.pilImage.tobytes()
return await to_thread(format)
elif options['format'] == 'rgb':
def format():
rgb = pilImage.pilImage
if rgb.format == 'RGBA':
rgb = rgb.convert('RGB')
return rgb.tobytes()
rgbx = pilImage.pilImage
if rgbx.mode != 'RGBA':
return rgbx.tobytes()
rgb = rgbx.convert('RGB')
try:
return rgb.tobytes()
finally:
rgb.close()
return await to_thread(format)
return await to_thread(lambda: pilImage.pilImage.write_to_buffer('.' + options['format']))
def save():
bytesArray = io.BytesIO()
pilImage.pilImage.save(bytesArray, format=options['format'])
return bytesArray.getvalue()
return await to_thread(lambda: save())
async def toPILImage(self, options: scrypted_sdk.ImageOptions = None):
return await to_thread(lambda: toPILImage(self, options))
@@ -66,7 +73,7 @@ def toPILImage(pilImageWrapper: PILImage, options: scrypted_sdk.ImageOptions = N
if not width:
width = pilImage.width * yscale
pilImage = pilImage.resize((width, height), resample=Image.Resampling.BILINEAR)
pilImage = pilImage.resize((width, height), resample=Image.BILINEAR)
return PILImage(pilImage)
@@ -89,6 +96,7 @@ class ImageReader(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.BufferConverter)
async def convert(self, data: Any, fromMimeType: str, toMimeType: str, options: scrypted_sdk.MediaObjectOptions = None) -> Any:
pil = Image.open(io.BytesIO(data))
pil.load()
return await createPILMediaObject(PILImage(pil))
class ImageWriter(scrypted_sdk.ScryptedDeviceBase, scrypted_sdk.BufferConverter):

View File

@@ -1,7 +1,11 @@
# plugin
# gobject instrospection for gstreamer.
PyGObject>=3.30.4; sys_platform != 'win32'
# libav doesnt work on arm7
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
# pyvips is not available on windows, and is preinstalled as part of the installer scripts on
# mac and linux.
pyvips; sys_platform != 'win32'
# in case pyvips fails to load, use a pillow fallback.

View File

@@ -1,5 +1,4 @@
import asyncio
from typing import Any
import concurrent.futures
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.

View File

@@ -6,7 +6,6 @@ try:
except:
Image = None
pyvips = None
pass
from thread import to_thread
class VipsImage(scrypted_sdk.VideoFrame):

View File

@@ -1,118 +1,21 @@
from __future__ import annotations
from asyncio.events import AbstractEventLoop, TimerHandle
from asyncio.futures import Future
from typing import Any, Mapping, Tuple
from typing_extensions import TypedDict
from pipeline import GstPipeline, GstPipelineBase, create_pipeline_sink, safe_set_result
import scrypted_sdk
import json
import asyncio
import time
import os
import binascii
from urllib.parse import urlparse
import threading
from pipeline import run_pipeline
import platform
from .corohelper import run_coro_threadsafe
from PIL import Image
import math
import io
from typing import Any, Tuple
Gst = None
try:
from gi.repository import Gst
except:
pass
av = None
try:
import av
av.logging.set_level(av.logging.PANIC)
except:
pass
from scrypted_sdk.types import ObjectDetectionGeneratorSession, ObjectDetectionModel, Setting, FFmpegInput, MediaObject, ObjectDetection, ObjectDetectionCallbacks, ObjectDetectionSession, ObjectsDetected, ScryptedInterface, ScryptedMimeTypes
def optional_chain(root, *keys):
result = root
for k in keys:
if isinstance(result, dict):
result = result.get(k, None)
else:
result = getattr(result, k, None)
if result is None:
break
return result
class DetectionSession:
id: str
timerHandle: TimerHandle
future: Future
loop: AbstractEventLoop
settings: Any
running: bool
plugin: DetectPlugin
callbacks: ObjectDetectionCallbacks
user_callback: Any
def __init__(self) -> None:
self.timerHandle = None
self.future = Future()
self.running = False
self.mutex = threading.Lock()
self.last_sample = time.time()
self.user_callback = None
def clearTimeoutLocked(self):
if self.timerHandle:
self.timerHandle.cancel()
self.timerHandle = None
def clearTimeout(self):
with self.mutex:
self.clearTimeoutLocked()
def timedOut(self):
self.plugin.end_session(self)
def setTimeout(self, duration: float):
with self.mutex:
self.clearTimeoutLocked()
self.timerHandle = self.loop.call_later(
duration, lambda: self.timedOut())
class DetectionSink(TypedDict):
pipeline: str
input_size: Tuple[float, float]
import scrypted_sdk
from scrypted_sdk.types import (MediaObject, ObjectDetection,
ObjectDetectionCallbacks,
ObjectDetectionGeneratorSession,
ObjectDetectionModel, ObjectDetectionSession,
ObjectsDetected, ScryptedMimeTypes, Setting)
class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
self.detection_sessions: Mapping[str, DetectionSession] = {}
self.session_mutex = threading.Lock()
self.crop = False
self.loop = asyncio.get_event_loop()
async def getSettings(self) -> list[Setting]:
activeSessions: Setting = {
'key': 'activeSessions',
'readonly': True,
'title': 'Active Detection Sessions',
'value': len(self.detection_sessions),
}
return [
activeSessions
]
async def putSetting(self, key: str, value: scrypted_sdk.SettingValue) -> None:
pass
def getClasses(self) -> list[str]:
pass
@@ -138,165 +41,21 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
'settings': [],
}
decoderSetting: Setting = {
'title': "Decoder",
'description': "The tool used to decode the stream. The may be libav or a gstreamer element.",
'combobox': True,
'value': 'Default',
'placeholder': 'Default',
'key': 'decoder',
'subgroup': 'Advanced',
'choices': [
'Default',
'libav',
'decodebin',
'vtdec_hw',
'nvh264dec',
'vaapih264dec',
],
}
d['settings'] += self.getModelSettings(settings)
d['settings'].append(decoderSetting)
return d
async def detection_event(self, detection_session: DetectionSession, detection_result: ObjectsDetected, redetect: Any = None, mediaObject = None):
if not detection_session.running and detection_result.get('running'):
return
detection_result['timestamp'] = int(time.time() * 1000)
if detection_session.callbacks:
if detection_session.running:
return await detection_session.callbacks.onDetection(detection_result, redetect, mediaObject)
else:
await detection_session.callbacks.onDetectionEnded(detection_result)
else:
# legacy path, nuke this pattern in opencv, pam diff, and full tensorflow.
detection_result['detectionId'] = detection_session.id
await self.onDeviceEvent(ScryptedInterface.ObjectDetection.value, detection_result)
def end_session(self, detection_session: DetectionSession):
print('detection ended', detection_session.id)
detection_session.clearTimeout()
# leave detection_session.running as True to avoid race conditions.
# the removal from detection_sessions will restart it.
safe_set_result(detection_session.loop, detection_session.future)
with self.session_mutex:
self.detection_sessions.pop(detection_session.id, None)
detection_result: ObjectsDetected = {}
detection_result['running'] = False
asyncio.run_coroutine_threadsafe(self.detection_event(detection_session, detection_result), loop=detection_session.loop)
def create_detection_result_status(self, detection_id: str, running: bool):
detection_result: ObjectsDetected = {}
detection_result['detectionId'] = detection_id
detection_result['running'] = running
detection_result['timestamp'] = int(time.time() * 1000)
return detection_result
def run_detection_jpeg(self, detection_session: DetectionSession, image_bytes: bytes, settings: Any) -> ObjectsDetected:
pass
def get_detection_input_size(self, src_size):
pass
def create_detection_session(self):
return DetectionSession()
def run_detection_gstsample(self, detection_session: DetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
pass
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: DetectionSession) -> ObjectsDetected:
pass
async def run_detection_avframe(self, detection_session: DetectionSession, avframe, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
pil: Image.Image = avframe.to_image()
return await self.run_detection_image(detection_session, pil, settings, src_size, convert_to_src_size)
async def run_detection_image(self, detection_session: DetectionSession, image: Image.Image, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Any]:
pass
def run_detection_crop(self, detection_session: DetectionSession, sample: Any, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected:
print("not implemented")
pass
def ensure_session(self, mediaObjectMimeType: str, session: ObjectDetectionSession) -> Tuple[bool, DetectionSession, ObjectsDetected]:
settings = None
duration = None
detection_id = None
detection_session = None
if session:
detection_id = session.get('detectionId', None)
duration = session.get('duration', None)
settings = session.get('settings', None)
is_image = mediaObjectMimeType and mediaObjectMimeType.startswith(
'image/')
ending = False
new_session = False
with self.session_mutex:
if not is_image and not detection_id:
detection_id = binascii.b2a_hex(os.urandom(15)).decode('utf8')
if detection_id:
detection_session = self.detection_sessions.get(
detection_id, None)
if duration == None and not is_image:
ending = True
elif detection_id and not detection_session:
if not mediaObjectMimeType:
return (False, None, self.create_detection_result_status(detection_id, False))
new_session = True
detection_session = self.create_detection_session()
detection_session.plugin = self
detection_session.id = detection_id
detection_session.settings = settings
loop = asyncio.get_event_loop()
detection_session.loop = loop
self.detection_sessions[detection_id] = detection_session
detection_session.future.add_done_callback(
lambda _: self.end_session(detection_session))
if not ending and detection_session and time.time() - detection_session.last_sample > 30 and not mediaObjectMimeType:
print('detection session has not received a sample in 30 seconds, terminating',
detection_session.id)
ending = True
if ending:
if detection_session:
self.end_session(detection_session)
return (False, None, self.create_detection_result_status(detection_id, False))
if is_image:
return (False, detection_session, None)
detection_session.setTimeout(duration / 1000)
if settings != None:
detection_session.settings = settings
if not new_session:
print("existing session", detection_session.id)
return (False, detection_session, self.create_detection_result_status(detection_id, detection_session.running))
return (True, detection_session, None)
async def generateObjectDetections(self, videoFrames: Any, session: ObjectDetectionGeneratorSession = None) -> Any:
try:
videoFrames = await scrypted_sdk.sdk.connectRPCObject(videoFrames)
detection_session = self.create_detection_session()
detection_session.plugin = self
detection_session.settings = session and session.get('settings')
async for videoFrame in videoFrames:
detected = await self.run_detection_videoframe(videoFrame, detection_session)
detected = await self.run_detection_videoframe(videoFrame, session)
yield {
'__json_copy_serialize_children': True,
'detected': detected,
@@ -309,261 +68,13 @@ class DetectPlugin(scrypted_sdk.ScryptedDeviceBase, ObjectDetection):
pass
async def detectObjects(self, mediaObject: MediaObject, session: ObjectDetectionSession = None, callbacks: ObjectDetectionCallbacks = None) -> ObjectsDetected:
is_image = mediaObject and (mediaObject.mimeType.startswith('image/') or mediaObject.mimeType.endswith('/x-raw-image'))
settings = None
duration = None
if session:
duration = session.get('duration', None)
settings = session.get('settings', None)
vf: scrypted_sdk.VideoFrame
if mediaObject and mediaObject.mimeType == ScryptedMimeTypes.Image.value:
vf: scrypted_sdk.VideoFrame = mediaObject
return await self.run_detection_videoframe(vf, settings)
vf = mediaObject
else:
vf = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.Image.value)
create, detection_session, objects_detected = self.ensure_session(
mediaObject and mediaObject.mimeType, session)
if detection_session:
detection_session.callbacks = callbacks
if is_image:
stream = io.BytesIO(bytes(await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, 'image/jpeg')))
image = Image.open(stream)
if detection_session:
if not detection_session.user_callback:
detection_session.user_callback = self.create_user_callback(self.run_detection_image, detection_session, duration)
def convert_to_src_size(point, normalize = False):
x, y = point
return (int(math.ceil(x)), int(math.ceil(y)), True)
detection_session.running = True
try:
return await detection_session.user_callback(image, image.size, convert_to_src_size)
finally:
detection_session.running = False
else:
return await self.run_detection_jpeg(detection_session, bytes(await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, 'image/jpeg')), settings)
if not create:
# a detection session may have been created, but not started
# if the initial request was for an image.
# however, attached sessions should be unchoked, as the pipeline
# is not managed here.
if not detection_session or detection_session.running or not mediaObject:
return objects_detected
detection_id = detection_session.id
detection_session.running = True
print('detection starting', detection_id)
b = await scrypted_sdk.mediaManager.convertMediaObjectToBuffer(mediaObject, ScryptedMimeTypes.FFmpegInput.value)
s = b.decode('utf8')
j: FFmpegInput = json.loads(s)
container = j.get('container', None)
videosrc = j['url']
videoCodec = optional_chain(j, 'mediaStreamOptions', 'video', 'codec')
decoder = settings and settings.get('decoder')
if decoder == 'Default':
decoder = None
if decoder == 'libav' and not av:
decoder = None
elif decoder != 'libav' and not Gst:
decoder = None
if not decoder:
if Gst:
if videoCodec == 'h264':
# hw acceleration is "safe" to use on mac, but not
# on other hosts where it may crash.
# defaults must be safe.
if platform.system() == 'Darwin':
decoder = 'vtdec_hw'
else:
decoder = 'avdec_h264'
else:
# decodebin may pick a hardware accelerated decoder, which isn't ideal
# so use a known software decoder for h264 and decodebin for anything else.
decoder = 'decodebin'
elif av:
decoder = 'libav'
if decoder == 'libav':
user_callback = self.create_user_callback(self.run_detection_avframe, detection_session, duration)
async def inference_loop():
options = {
'analyzeduration': '0',
'probesize': '500000',
'reorder_queue_size': '0',
}
container = av.open(videosrc, options = options)
stream = container.streams.video[0]
start = 0
for idx, frame in enumerate(container.decode(stream)):
if detection_session.future.done():
container.close()
break
now = time.time()
if not start:
start = now
elapsed = now - start
if (frame.time or 0) < elapsed - 0.500:
# print('too slow, skipping frame')
continue
# print(frame)
size = (frame.width, frame.height)
def convert_to_src_size(point, normalize = False):
x, y = point
return (int(math.ceil(x)), int(math.ceil(y)), True)
await user_callback(frame, size, convert_to_src_size)
def thread_main():
loop = asyncio.new_event_loop()
loop.run_until_complete(inference_loop())
thread = threading.Thread(target=thread_main)
thread.start()
return self.create_detection_result_status(detection_id, True)
if not Gst:
raise Exception('Gstreamer is unavailable')
if videosrc.startswith('tcp://'):
parsed_url = urlparse(videosrc)
videosrc = 'tcpclientsrc port=%s host=%s' % (
parsed_url.port, parsed_url.hostname)
if container == 'mpegts':
videosrc += ' ! tsdemux'
elif container == 'sdp':
videosrc += ' ! sdpdemux'
else:
raise Exception('unknown container %s' % container)
elif videosrc.startswith('rtsp'):
videosrc = 'rtspsrc buffer-mode=0 location=%s protocols=tcp latency=0 is-live=false' % videosrc
if videoCodec == 'h264':
videosrc += ' ! rtph264depay ! h264parse'
videosrc += " ! %s" % decoder
width = optional_chain(j, 'mediaStreamOptions',
'video', 'width') or 1920
height = optional_chain(j, 'mediaStreamOptions',
'video', 'height') or 1080
src_size = (width, height)
self.run_pipeline(detection_session, duration, src_size, videosrc)
return self.create_detection_result_status(detection_id, True)
return await self.run_detection_videoframe(vf, session)
def get_pixel_format(self):
return 'RGB'
def create_pipeline_sink(self, src_size) -> DetectionSink:
inference_size = self.get_detection_input_size(src_size)
ret: DetectionSink = {}
ret['input_size'] = inference_size
ret['pipeline'] = create_pipeline_sink(
type(self).__name__, inference_size, self.get_pixel_format())
return ret
async def detection_event_notified(self, settings: Any):
pass
async def createMedia(self, data: Any) -> MediaObject:
pass
def invalidateMedia(self, detection_session: DetectionSession, data: Any):
pass
def create_user_callback(self, run_detection: Any, detection_session: DetectionSession, duration: float):
first_frame = True
current_data = None
current_src_size = None
current_convert_to_src_size = None
async def redetect(boundingBox: Tuple[float, float, float, float]):
nonlocal current_data
nonlocal current_src_size
nonlocal current_convert_to_src_size
if not current_data:
raise Exception('no sample')
detection_result = await self.run_detection_crop(
detection_session, current_data, detection_session.settings, current_src_size, current_convert_to_src_size, boundingBox)
return detection_result['detections']
async def user_callback(sample, src_size, convert_to_src_size):
try:
detection_session.last_sample = time.time()
nonlocal first_frame
if first_frame:
first_frame = False
print("first frame received", detection_session.id)
detection_result, data = await run_detection(
detection_session, sample, detection_session.settings, src_size, convert_to_src_size)
if detection_result:
detection_result['running'] = True
mo = None
retain = False
def maybeInvalidate():
if not retain:
self.invalidateMedia(detection_session, data)
# else:
# print('retaining')
mo = await self.createMedia(data)
try:
nonlocal current_data
nonlocal current_src_size
nonlocal current_convert_to_src_size
try:
current_data = data
current_src_size = src_size
current_convert_to_src_size = convert_to_src_size
retain = await run_coro_threadsafe(self.detection_event(detection_session, detection_result, redetect, mo), other_loop=detection_session.loop)
finally:
current_data = None
current_convert_to_src_size = None
current_src_size = None
maybeInvalidate()
except Exception as e:
print(e)
self.invalidateMedia(detection_session, data)
# asyncio.run_coroutine_threadsafe(, loop = self.loop).result()
await self.detection_event_notified(detection_session.settings)
if not detection_session or duration == None:
safe_set_result(detection_session.loop,
detection_session.future)
return detection_result
finally:
pass
return user_callback
def run_pipeline(self, detection_session: DetectionSession, duration, src_size, video_input):
inference_size = self.get_detection_input_size(src_size)
pipeline = run_pipeline(detection_session.loop, detection_session.future, self.create_user_callback(self.run_detection_gstsample, detection_session, duration),
appsink_name=type(self).__name__,
appsink_size=inference_size,
video_input=video_input,
pixel_format=self.get_pixel_format(),
crop=self.crop,
)
task = pipeline.run()
asyncio.ensure_future(task)

View File

@@ -1,315 +0,0 @@
from asyncio.events import AbstractEventLoop
from asyncio.futures import Future
import threading
from .safe_set_result import safe_set_result
import math
import asyncio
try:
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
from gi.repository import GObject, Gst
GObject.threads_init()
Gst.init(None)
except:
pass
class GstPipelineBase:
def __init__(self, loop: AbstractEventLoop, finished: Future) -> None:
self.loop = loop
self.finished = finished
self.gst = None
def attach_launch(self, gst):
self.gst = gst
def parse_launch(self, pipeline: str):
self.attach_launch(Gst.parse_launch(pipeline))
# Set up a pipeline bus watch to catch errors.
self.bus = self.gst.get_bus()
self.watchId = self.bus.connect('message', self.on_bus_message)
self.bus.add_signal_watch()
def on_bus_message(self, bus, message):
# seeing the following error on pi 32 bit
# OverflowError: Python int too large to convert to C long
t = str(message.type)
if t == str(Gst.MessageType.EOS):
safe_set_result(self.loop, self.finished)
elif t == str(Gst.MessageType.WARNING):
err, debug = message.parse_warning()
print('Warning: %s: %s\n' % (err, debug))
elif t == str(Gst.MessageType.ERROR):
err, debug = message.parse_error()
print('Error: %s: %s\n' % (err, debug))
safe_set_result(self.loop, self.finished)
return True
async def run_attached(self):
try:
await self.finished
except:
pass
async def attach(self):
pass
async def detach(self):
pass
async def run(self):
await self.attach()
# Run pipeline.
self.gst.set_state(Gst.State.PLAYING)
try:
await self.run_attached()
finally:
# Clean up.
self.bus.remove_signal_watch()
self.bus.disconnect(self.watchId)
self.gst.set_state(Gst.State.NULL)
self.bus = None
self.watchId = None
self.gst = None
await self.detach()
class GstPipeline(GstPipelineBase):
def __init__(self, loop: AbstractEventLoop, finished: Future, appsink_name: str, user_callback, crop=False):
super().__init__(loop, finished)
self.appsink_name = appsink_name
self.user_callback = user_callback
self.running = False
self.gstsample = None
self.sink_size = None
self.src_size = None
self.dst_size = None
self.pad_size = None
self.scale_size = None
self.crop = crop
self.condition = None
def attach_launch(self, gst):
super().attach_launch(gst)
appsink = self.gst.get_by_name(self.appsink_name)
appsink.connect('new-preroll', self.on_new_sample, True)
appsink.connect('new-sample', self.on_new_sample, False)
async def attach(self):
# Start inference worker.
self.running = True
worker = threading.Thread(target=self.inference_main)
worker.start()
while not self.condition:
await asyncio.sleep(.1)
async def detach(self):
async def notifier():
async with self.condition:
self.condition.notify_all()
self.running = False
asyncio.run_coroutine_threadsafe(notifier(), loop = self.selfLoop)
def on_new_sample(self, sink, preroll):
sample = sink.emit('pull-preroll' if preroll else 'pull-sample')
if not self.sink_size:
s = sample.get_caps().get_structure(0)
self.sink_size = (s.get_value('width'), s.get_value('height'))
self.gstsample = sample
async def notifier():
async with self.condition:
self.condition.notify_all()
try:
if self.running:
asyncio.run_coroutine_threadsafe(notifier(), loop = self.selfLoop).result()
except Exception as e:
# now what?
# print('sample error')
# print(e)
pass
return Gst.FlowReturn.OK
def get_src_size(self):
if not self.src_size:
videoconvert = self.gst.get_by_name('videoconvert')
structure = videoconvert.srcpads[0].get_current_caps(
).get_structure(0)
_, w = structure.get_int('width')
_, h = structure.get_int('height')
self.src_size = (w, h)
videoscale = self.gst.get_by_name('videoscale')
structure = videoscale.srcpads[0].get_current_caps(
).get_structure(0)
_, w = structure.get_int('width')
_, h = structure.get_int('height')
self.dst_size = (w, h)
appsink = self.gst.get_by_name(self.appsink_name)
structure = appsink.sinkpads[0].get_current_caps().get_structure(0)
_, w = structure.get_int('width')
_, h = structure.get_int('height')
self.dst_size = (w, h)
# the dimension with the higher scale value got cropped or boxed.
# use the other dimension to figure out the crop/box amount.
scales = (self.dst_size[0] / self.src_size[0],
self.dst_size[1] / self.src_size[1])
if self.crop:
scale = max(scales[0], scales[1])
else:
scale = min(scales[0], scales[1])
self.scale_size = scale
dx = self.src_size[0] * scale
dy = self.src_size[1] * scale
px = math.ceil((self.dst_size[0] - dx) / 2)
py = math.ceil((self.dst_size[1] - dy) / 2)
self.pad_size = (px, py)
return self.src_size
def convert_to_src_size(self, point, normalize=False):
valid = True
px, py = self.pad_size
x, y = point
if normalize:
x = max(0, x)
x = min(x, self.src_size[0] - 1)
y = max(0, y)
y = min(y, self.src_size[1] - 1)
x = (x - px) / self.scale_size
if x < 0:
x = 0
valid = False
if x >= self.src_size[0]:
x = self.src_size[0] - 1
valid = False
y = (y - py) / self.scale_size
if y < 0:
y = 0
valid = False
if y >= self.src_size[1]:
y = self.src_size[1] - 1
valid = False
return (int(math.ceil(x)), int(math.ceil(y)), valid)
def inference_main(self):
loop = asyncio.new_event_loop()
self.selfLoop = loop
try:
loop.run_until_complete(self.inference_loop())
finally:
loop.close()
async def inference_loop(self):
self.condition = asyncio.Condition()
while self.running:
async with self.condition:
while not self.gstsample and self.running:
await self.condition.wait()
if not self.running:
return
gstsample = self.gstsample
self.gstsample = None
try:
await self.user_callback(gstsample, self.get_src_size(
), lambda p, normalize=False: self.convert_to_src_size(p, normalize))
except Exception as e:
print("callback failure")
print(e)
raise
def get_dev_board_model():
try:
model = open('/sys/firmware/devicetree/base/model').read().lower()
if 'mx8mq' in model:
return 'mx8mq'
if 'mt8167' in model:
return 'mt8167'
except:
pass
return None
def create_pipeline_sink(
appsink_name,
appsink_size,
pixel_format,
crop=False):
SINK_ELEMENT = 'appsink name={appsink_name} emit-signals=true max-buffers=-1 drop=true sync=false'.format(
appsink_name=appsink_name)
(width, height) = appsink_size
SINK_CAPS = 'video/x-raw,format={pixel_format}'
if width and height:
SINK_CAPS += ',width={width},height={height},pixel-aspect-ratio=1/1'
sink_caps = SINK_CAPS.format(
width=width, height=height, pixel_format=pixel_format)
pipeline = " {sink_caps} ! {sink_element}".format(
sink_caps=sink_caps,
sink_element=SINK_ELEMENT)
return pipeline
def create_pipeline(
appsink_name,
appsink_size,
video_input,
pixel_format,
crop=False,
parse_only=False):
if parse_only:
sink = 'appsink name={appsink_name} emit-signals=true sync=false'.format(
appsink_name=appsink_name)
PIPELINE = """ {video_input}
! {sink}
"""
else:
sink = create_pipeline_sink(
appsink_name, appsink_size, pixel_format, crop=crop)
if crop:
PIPELINE = """ {video_input} ! queue leaky=downstream max-size-buffers=0 ! videoconvert name=videoconvert ! aspectratiocrop aspect-ratio=1/1 ! videoscale name=videoscale ! queue leaky=downstream max-size-buffers=0
! {sink}
"""
else:
PIPELINE = """ {video_input} ! queue leaky=downstream max-size-buffers=0 ! videoconvert name=videoconvert ! videoscale name=videoscale ! queue leaky=downstream max-size-buffers=0
! {sink}
"""
pipeline = PIPELINE.format(video_input=video_input, sink=sink)
print('Gstreamer pipeline:\n', pipeline)
return pipeline
def run_pipeline(loop, finished,
user_callback,
appsink_name,
appsink_size,
video_input,
pixel_format,
crop=False,
parse_only=False):
gst = GstPipeline(loop, finished, appsink_name, user_callback, crop=crop)
pipeline = create_pipeline(
appsink_name, appsink_size, video_input, pixel_format, crop=crop, parse_only=parse_only)
gst.parse_launch(pipeline)
return gst

View File

@@ -1,11 +0,0 @@
from asyncio.futures import Future
from asyncio import AbstractEventLoop
def safe_set_result(loop: AbstractEventLoop, future: Future):
def loop_set_result():
try:
if not future.done():
future.set_result(None)
except:
pass
loop.call_soon_threadsafe(loop_set_result)

View File

@@ -1,37 +1,40 @@
from __future__ import annotations
from scrypted_sdk.types import ObjectDetectionResult, ObjectsDetected, Setting
import io
from PIL import Image
import re
import scrypted_sdk
from typing import Any, List, Tuple, Mapping
import asyncio
import time
from .rectangle import Rectangle, intersect_area, intersect_rect, to_bounding_box, from_bounding_box, combine_rect
import urllib.request
import concurrent.futures
import os
import re
import urllib.request
from typing import Any, List, Tuple
from detect import DetectionSession, DetectPlugin
import scrypted_sdk
from PIL import Image
from scrypted_sdk.types import (ObjectDetectionResult, ObjectDetectionSession,
ObjectsDetected, Setting)
from .sort_oh import tracker
import numpy as np
import traceback
from detect import DetectPlugin
try:
from gi.repository import Gst
except:
pass
from .rectangle import (Rectangle, combine_rect, from_bounding_box,
intersect_area, intersect_rect, to_bounding_box)
class PredictSession(DetectionSession):
image: Image.Image
tracker: sort_oh.tracker.Sort_OH
# vips is already multithreaded, but needs to be kicked off the python asyncio thread.
toThreadExecutor = concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="image")
def __init__(self, start_time: float) -> None:
super().__init__()
self.image = None
self.processed = 0
self.start_time = start_time
self.tracker = None
async def to_thread(f):
loop = asyncio.get_running_loop()
return await loop.run_in_executor(toThreadExecutor, f)
async def ensureRGBData(data: bytes, size: Tuple[int, int], format: str):
if format != 'rgba':
return Image.frombuffer('RGB', size, data)
def convert():
rgba = Image.frombuffer('RGBA', size, data)
try:
return rgba.convert('RGB')
finally:
rgba.close()
return await to_thread(convert)
def parse_label_contents(contents: str):
lines = contents.splitlines()
@@ -121,7 +124,6 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
self.toMimeType = scrypted_sdk.ScryptedMimeTypes.MediaObject.value
self.crop = False
self.trackers: Mapping[str, tracker.Sort_OH] = {}
# periodic restart because there seems to be leaks in tflite or coral API.
loop = asyncio.get_event_loop()
@@ -148,42 +150,6 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
mo = await scrypted_sdk.mediaManager.createMediaObject(data, self.fromMimeType)
return mo
def end_session(self, detection_session: PredictSession):
image = detection_session.image
if image:
detection_session.image = None
image.close()
dps = detection_session.processed / (time.time() - detection_session.start_time)
print("Detections per second %s" % dps)
return super().end_session(detection_session)
def invalidateMedia(self, detection_session: PredictSession, data: RawImage):
if not data:
return
image = data.image
data.image = None
if image:
if not detection_session.image:
detection_session.image = image
else:
image.close()
data.jpegMediaObject = None
async def convert(self, data: RawImage, fromMimeType: str, toMimeType: str, options: scrypted_sdk.BufferConvertorOptions = None) -> Any:
mo = data.jpegMediaObject
if not mo:
image = data.image
if not image:
raise Exception('data is no longer valid')
bio = io.BytesIO()
image.save(bio, format='JPEG')
jpegBytes = bio.getvalue()
mo = await scrypted_sdk.mediaManager.createMediaObject(jpegBytes, 'image/jpeg')
data.jpegMediaObject = mo
return mo
def requestRestart(self):
asyncio.ensure_future(scrypted_sdk.deviceManager.requestRestart())
@@ -210,23 +176,7 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
],
}
trackerWindow: Setting = {
'title': 'Tracker Window',
'subgroup': 'Advanced',
'description': 'Internal Setting. Do not change.',
'key': 'trackerWindow',
'value': 3,
'type': 'number',
}
trackerCertainty: Setting = {
'title': 'Tracker Certainty',
'subgroup': 'Advanced',
'description': 'Internal Setting. Do not change.',
'key': 'trackerCertainty',
'value': .2,
'type': 'number',
}
return [allowList, trackerWindow, trackerCertainty]
return [allowList]
def create_detection_result(self, objs: List[Prediction], size, allowList, convert_to_src_size=None) -> ObjectsDetected:
detections: List[ObjectDetectionResult] = []
@@ -250,27 +200,15 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
detection_result['detections'] = []
for detection in detections:
bb = detection['boundingBox']
x, y, valid = convert_to_src_size((bb[0], bb[1]), True)
x2, y2, valid2 = convert_to_src_size(
(bb[0] + bb[2], bb[1] + bb[3]), True)
if not valid or not valid2:
# print("filtering out", detection['className'])
continue
x, y = convert_to_src_size((bb[0], bb[1]))
x2, y2 = convert_to_src_size(
(bb[0] + bb[2], bb[1] + bb[3]))
detection['boundingBox'] = (x, y, x2 - x + 1, y2 - y + 1)
detection_result['detections'].append(detection)
# print(detection_result)
return detection_result
async def run_detection_jpeg(self, detection_session: PredictSession, image_bytes: bytes, settings: Any) -> ObjectsDetected:
stream = io.BytesIO(image_bytes)
image = Image.open(stream)
if image.mode == 'RGBA':
image = image.convert('RGB')
detections, _ = await self.run_detection_image(detection_session, image, settings, image.size)
return detections
def get_detection_input_size(self, src_size):
# signals to pipeline that any input size is fine
# previous code used to resize to correct size and run detection that way.
@@ -284,8 +222,8 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
async def detect_once(self, input: Image.Image, settings: Any, src_size, cvss) -> ObjectsDetected:
pass
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: PredictSession) -> ObjectsDetected:
settings = detection_session and detection_session.settings
async def run_detection_videoframe(self, videoFrame: scrypted_sdk.VideoFrame, detection_session: ObjectDetectionSession) -> ObjectsDetected:
settings = detection_session and detection_session.get('settings')
src_size = videoFrame.width, videoFrame.height
w, h = self.get_input_size()
iw, ih = src_size
@@ -293,16 +231,13 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
hs = h / ih
s = max(ws, hs)
if ws == 1 and hs == 1:
def cvss(point, normalize=False):
return point[0], point[1], True
def cvss(point):
return point[0], point[1]
data = await videoFrame.toBuffer({
'format': videoFrame.format or 'rgb',
})
if videoFrame.format == 'rgba':
image = Image.frombuffer('RGBA', (w, h), data).convert('RGB')
else:
image = Image.frombuffer('RGB', (w, h), data)
image = await ensureRGBData(data, (w, h), videoFrame.format)
try:
ret = await self.detect_once(image, settings, src_size, cvss)
return ret
@@ -347,19 +282,15 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
})
)
if videoFrame.format == 'rgba':
first = Image.frombuffer('RGBA', (w, h), firstData).convert('RGB')
else:
first = Image.frombuffer('RGB', (w, h), firstData)
if videoFrame.format == 'rgba':
second = Image.frombuffer('RGBA', (w, h), secondData).convert('RGB')
else:
second = Image.frombuffer('RGB', (w, h), secondData)
first, second = await asyncio.gather(
ensureRGBData(firstData, (w, h), videoFrame.format),
ensureRGBData(secondData, (w, h), videoFrame.format)
)
def cvss1(point, normalize=False):
return point[0] / s, point[1] / s, True
def cvss2(point, normalize=False):
return point[0] / s + ow, point[1] / s + oh, True
def cvss1(point):
return point[0] / s, point[1] / s
def cvss2(point):
return point[0] / s + ow, point[1] / s + oh
ret1 = await self.detect_once(first, settings, src_size, cvss1)
first.close()
@@ -395,242 +326,3 @@ class PredictPlugin(DetectPlugin, scrypted_sdk.BufferConverter, scrypted_sdk.Set
ret = ret1
ret['detections'] = dedupe_detections(ret1['detections'] + ret2['detections'], is_same_detection=is_same_detection_middle)
return ret
async def run_detection_image(self, detection_session: PredictSession, image: Image.Image, settings: Any, src_size, convert_to_src_size: Any = None, multipass_crop: Tuple[float, float, float, float] = None):
(w, h) = self.get_input_size() or image.size
(iw, ih) = image.size
if detection_session and not detection_session.tracker:
t = self.trackers.get(detection_session.id)
if not t:
t = tracker.Sort_OH(scene=np.array([iw, ih]))
trackerCertainty = settings.get('trackerCertainty')
if not isinstance(trackerCertainty, int):
trackerCertainty = .2
t.conf_three_frame_certainty = trackerCertainty * 3
trackerWindow = settings.get('trackerWindow')
if not isinstance(trackerWindow, int):
trackerWindow = 3
t.conf_unmatched_history_size = trackerWindow
self.trackers[detection_session.id] = t
detection_session.tracker = t
# conf_trgt = 0.35
# conf_objt = 0.75
# detection_session.tracker.conf_trgt = conf_trgt
# detection_session.tracker.conf_objt = conf_objt
# this a single pass or the second pass. detect once and return results.
if multipass_crop:
(l, t, dx, dy) = multipass_crop
# find center
cx = l + dx / 2
cy = t + dy / 2
# fix aspect ratio on box
if dx / w > dy / h:
dy = dx / w * h
else:
dx = dy / h * w
if dx > image.width:
s = image.width / dx
dx = image.width
dy *= s
if dy > image.height:
s = image.height / dy
dy = image.height
dx *= s
# crop size to fit input size
if dx < w:
dx = w
if dy < h:
dy = h
l = cx - dx / 2
t = cy - dy / 2
if l < 0:
l = 0
if t < 0:
t = 0
if l + dx > iw:
l = iw - dx
if t + dy > ih:
t = ih - dy
crop_box = (l, t, l + dx, t + dy)
if dx == w and dy == h:
input = image.crop(crop_box)
else:
input = image.resize((w, h), Image.ANTIALIAS, crop_box)
def cvss(point, normalize=False):
unscaled = ((point[0] / w) * dx + l, (point[1] / h) * dy + t)
converted = convert_to_src_size(unscaled, normalize) if convert_to_src_size else (unscaled[0], unscaled[1], True)
return converted
ret = await self.detect_once(input, settings, src_size, cvss)
input.close()
detection_session.processed = detection_session.processed + 1
return ret, RawImage(image)
ws = w / iw
hs = h / ih
s = max(ws, hs)
if ws == 1 and hs == 1:
def cvss(point, normalize=False):
converted = convert_to_src_size(point, normalize) if convert_to_src_size else (point[0], point[1], True)
return converted
ret = await self.detect_once(image, settings, src_size, cvss)
if detection_session:
detection_session.processed = detection_session.processed + 1
else:
sw = int(w / s)
sh = int(h / s)
first_crop = (0, 0, sw, sh)
first = image.resize((w, h), Image.ANTIALIAS, first_crop)
ow = iw - sw
oh = ih - sh
second_crop = (ow, oh, ow + sw, oh + sh)
second = image.resize((w, h), Image.ANTIALIAS, second_crop)
def cvss1(point, normalize=False):
unscaled = (point[0] / s, point[1] / s)
converted = convert_to_src_size(unscaled, normalize) if convert_to_src_size else (unscaled[0], unscaled[1], True)
return converted
def cvss2(point, normalize=False):
unscaled = (point[0] / s + ow, point[1] / s + oh)
converted = convert_to_src_size(unscaled, normalize) if convert_to_src_size else (unscaled[0], unscaled[1], True)
return converted
ret1 = await self.detect_once(first, settings, src_size, cvss1)
first.close()
if detection_session:
detection_session.processed = detection_session.processed + 1
ret2 = await self.detect_once(second, settings, src_size, cvss2)
if detection_session:
detection_session.processed = detection_session.processed + 1
second.close()
two_intersect = intersect_rect(Rectangle(*first_crop), Rectangle(*second_crop))
def is_same_detection_middle(d1: ObjectDetectionResult, d2: ObjectDetectionResult):
same, ret = is_same_detection(d1, d2)
if same:
return same, ret
if d1['className'] != d2['className']:
return False, None
r1 = from_bounding_box(d1['boundingBox'])
m1 = intersect_rect(two_intersect, r1)
if not m1:
return False, None
r2 = from_bounding_box(d2['boundingBox'])
m2 = intersect_rect(two_intersect, r2)
if not m2:
return False, None
same, ret = is_same_box(to_bounding_box(m1), to_bounding_box(m2))
if not same:
return False, None
c = to_bounding_box(combine_rect(r1, r2))
return True, c
ret = ret1
ret['detections'] = dedupe_detections(ret1['detections'] + ret2['detections'], is_same_detection=is_same_detection_middle)
if detection_session:
self.track(detection_session, ret)
if not len(ret['detections']):
return ret, RawImage(image)
return ret, RawImage(image)
def track(self, detection_session: PredictSession, ret: ObjectsDetected):
detections = ret['detections']
sort_input = []
for d in ret['detections']:
r: ObjectDetectionResult = d
l, t, w, h = r['boundingBox']
sort_input.append([l, t, l + w, t + h, r['score']])
trackers, unmatched_trckr, unmatched_gts = detection_session.tracker.update(np.array(sort_input), [])
for td in trackers:
x0, y0, x1, y1, trackID = td[0].item(), td[1].item(
), td[2].item(), td[3].item(), td[4].item()
slop = 0
obj: ObjectDetectionResult = None
ta = (x1 - x0) * (y1 - y0)
box = Rectangle(x0, y0, x1, y1)
for d in detections:
if d.get('id'):
continue
ob: ObjectDetectionResult = d
dx0, dy0, dw, dh = ob['boundingBox']
dx1 = dx0 + dw
dy1 = dy0 + dh
da = dw * dh
area = intersect_area(Rectangle(dx0, dy0, dx1, dy1), box)
if not area:
continue
# intersect area always gonna be smaller than
# the detection or tracker area.
# greater numbers, ie approaching 2, is better.
dslop = area / ta + area / da
if (dslop > slop):
slop = dslop
obj = ob
if obj:
obj['id'] = str(trackID)
# this may happen if tracker predicts something is still in the scene
# but was not detected
# else:
# print('unresolved tracker')
# for d in detections:
# if not d.get('id'):
# # this happens if the tracker is not confident in a new detection yet due
# # to low score or has not been found in enough frames
# if d['className'] == 'person':
# print('untracked %s: %s' % (d['className'], d['score']))
async def run_detection_crop(self, detection_session: DetectionSession, sample: RawImage, settings: Any, src_size, convert_to_src_size, bounding_box: Tuple[float, float, float, float]) -> ObjectsDetected:
(ret, _) = await self.run_detection_image(detection_session, sample.image, settings, src_size, convert_to_src_size, bounding_box)
return ret
async def run_detection_gstsample(self, detection_session: PredictSession, gstsample, settings: Any, src_size, convert_to_src_size) -> Tuple[ObjectsDetected, Image.Image]:
caps = gstsample.get_caps()
# can't trust the width value, compute the stride
height = caps.get_structure(0).get_value('height')
width = caps.get_structure(0).get_value('width')
gst_buffer = gstsample.get_buffer()
result, info = gst_buffer.map(Gst.MapFlags.READ)
if not result:
return
try:
image = detection_session.image
detection_session.image = None
if image and (image.width != width or image.height != height):
image.close()
image = None
if image:
image.frombytes(bytes(info.data))
else:
image = Image.frombuffer('RGB', (width, height), bytes(info.data))
finally:
gst_buffer.unmap(info)
try:
return await self.run_detection_image(detection_session, image, settings, src_size, convert_to_src_size)
except:
image.close()
traceback.print_exc()
raise
def create_detection_session(self):
return PredictSession(start_time=time.time())

View File

@@ -1 +0,0 @@
../../../sort-tracker/sort_oh/libs

View File

@@ -1,16 +1,7 @@
--extra-index-url https://google-coral.github.io/py-repo/
# plugin
numpy>=1.16.2
# pillow for anything not intel linux
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
pycoral~=2.0
PyGObject>=3.30.4; sys_platform != 'win32'
# libav doesnt work on arm7
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
tflite-runtime==2.5.0.post1
# sort_oh
scipy
filterpy
# pillow for anything not intel linux, pillow-simd is available on x64 linux
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'

View File

@@ -1,5 +1,4 @@
from __future__ import annotations
import threading
from .common import *
from PIL import Image
from pycoral.adapters import detect

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/tensorflow-lite",
"version": "0.1.7",
"version": "0.1.8",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/tensorflow-lite",
"version": "0.1.7",
"version": "0.1.8",
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
}

View File

@@ -41,5 +41,5 @@
"devDependencies": {
"@scrypted/sdk": "file:../../sdk"
},
"version": "0.1.7"
"version": "0.1.8"
}

View File

@@ -1 +0,0 @@
../../tensorflow-lite/src/pipeline

View File

@@ -1,14 +1,8 @@
# plugin
numpy>=1.16.2
# pillow for anything not intel linux
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'
tensorflow-macos; sys_platform == 'darwin'
tensorflow; sys_platform != 'darwin'
PyGObject>=3.30.4; sys_platform != 'win32'
# not available on armhf
av>=10.0.0; sys_platform != 'linux' or platform_machine == 'x86_64' or platform_machine == 'aarch64'
# sort_oh
scipy
filterpy
numpy>=1.16.2
# pillow for anything not intel linux, pillow-simd is available on x64 linux
Pillow>=5.4.1; sys_platform != 'linux' or platform_machine != 'x86_64'
pillow-simd; sys_platform == 'linux' and platform_machine == 'x86_64'

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/webrtc",
"version": "0.1.37",
"version": "0.1.38",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/webrtc",
"version": "0.1.37",
"version": "0.1.38",
"dependencies": {
"@scrypted/common": "file:../../common",
"@scrypted/sdk": "file:../../sdk",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/webrtc",
"version": "0.1.37",
"version": "0.1.38",
"scripts": {
"scrypted-setup-project": "scrypted-setup-project",
"prescrypted-setup-project": "scrypted-package-json",

View File

@@ -27,7 +27,9 @@
"${workspaceFolder}/**/*.js"
],
"env": {
"SCRYPTED_PYTHON_PATH": "python3.10",
// force usage of system python because brew python is 3.11
// which has no wheels for coreml tools or tflite-runtime
"SCRYPTED_PYTHON_PATH": "/usr/bin/python3",
// "SCRYPTED_SHARED_WORKER": "true",
// "SCRYPTED_DISABLE_AUTHENTICATION": "true",
// "DEBUG": "*",

View File

@@ -1,12 +1,12 @@
{
"name": "@scrypted/server",
"version": "0.7.28",
"version": "0.7.39",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@scrypted/server",
"version": "0.7.28",
"version": "0.7.39",
"license": "ISC",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.10",

View File

@@ -1,6 +1,6 @@
{
"name": "@scrypted/server",
"version": "0.7.29",
"version": "0.7.41",
"description": "",
"dependencies": {
"@mapbox/node-pre-gyp": "^1.0.10",

View File

@@ -264,6 +264,14 @@ class PluginRemote:
nativeId, *values, sep=sep, end=end, flush=flush), self.loop)
async def loadZip(self, packageJson, zipData, options: dict=None):
try:
return await self.loadZipWrapped(packageJson, zipData, options)
except:
print('plugin start/fork failed')
traceback.print_exc()
raise
async def loadZipWrapped(self, packageJson, zipData, options: dict=None):
sdk = ScryptedStatic()
clusterId = options['clusterId']
@@ -370,6 +378,18 @@ class PluginRemote:
plugin_volume = os.environ.get('SCRYPTED_PLUGIN_VOLUME')
# it's possible to run 32bit docker on aarch64, which cause pip requirements
# to fail because pip only allows filtering on machine, even if running a different architeture.
# this will cause prebuilt wheel installation to fail.
if platform.machine() == 'aarch64' and platform.architecture()[0] == '32bit':
print('=============================================')
print('Python machine vs architecture mismatch detected. Plugin installation may fail.')
print('If Scrypted is running in docker, the docker version may be 32bit while the host kernel is 64bit.')
print('This may be resolved by reinstalling a 64bit docker.')
print('The docker architecture can be checked with the command: "file $(which docker)"')
print('The host architecture can be checked with: "uname -m"')
print('=============================================')
python_version = 'python%s' % str(
sys.version_info[0])+"."+str(sys.version_info[1])
print('python version:', python_version)
@@ -377,11 +397,13 @@ class PluginRemote:
python_versioned_directory = '%s-%s-%s' % (python_version, platform.system(), platform.machine())
SCRYPTED_BASE_VERSION = os.environ.get('SCRYPTED_BASE_VERSION')
if SCRYPTED_BASE_VERSION:
python_versioned_directory += SCRYPTED_BASE_VERSION
python_versioned_directory += '-' + SCRYPTED_BASE_VERSION
python_prefix = os.path.join(
plugin_volume, python_versioned_directory)
print('python prefix: %s' % python_prefix)
if not os.path.exists(python_prefix):
os.makedirs(python_prefix)
@@ -401,7 +423,18 @@ class PluginRemote:
pass
if need_pip:
shutil.rmtree(python_prefix)
try:
for de in os.listdir(plugin_volume):
if de.startswith('linux') or de.startswith('darwin') or de.startswith('win32') or de.startswith('python') or de.startswith('node'):
filePath = os.path.join(plugin_volume, de)
print('Removing old dependencies: %s' % filePath)
try:
shutil.rmtree(filePath)
except:
pass
except:
pass
os.makedirs(python_prefix)
print('requirements.txt (outdated)')
@@ -519,20 +552,10 @@ class PluginRemote:
self.deviceManager, self.mediaManager)
if not forkMain:
try:
from main import create_scrypted_plugin # type: ignore
except:
print('plugin failed to start')
traceback.print_exc()
raise
from main import create_scrypted_plugin # type: ignore
return await rpc.maybe_await(create_scrypted_plugin())
try:
from main import fork # type: ignore
except:
print('fork failed to start')
traceback.print_exc()
raise
from main import fork # type: ignore
forked = await rpc.maybe_await(fork())
if type(forked) == dict:
forked[rpc.RpcPeer.PROPERTY_JSON_COPY_SERIALIZE_CHILDREN] = True

View File

@@ -1,6 +1,12 @@
import { once } from 'events';
import net from 'net';
export class ListenZeroSingleClientTimeoutError extends Error {
constructor() {
super('timeout waiting for client')
}
}
export async function listenZero(server: net.Server, hostname?: string) {
server.listen(0, hostname);
await once(server, 'listening');
@@ -14,7 +20,7 @@ export async function listenZeroSingleClient(hostname?: string) {
const clientPromise = new Promise<net.Socket>((resolve, reject) => {
const timeout = setTimeout(() => {
server.close();
reject(new Error('timeout waiting for client'));
reject(new ListenZeroSingleClientTimeoutError());
}, 30000)
server.on('connection', client => {
server.close();

View File

@@ -1,3 +1,4 @@
import os from 'os';
import { Device, EngineIOHandler } from '@scrypted/types';
import AdmZip from 'adm-zip';
import crypto from 'crypto';
@@ -310,7 +311,9 @@ export class PluginHost {
this.worker.stdout.on('data', data => console.log(data.toString()));
this.worker.stderr.on('data', data => console.error(data.toString()));
const consoleHeader = `server version: ${serverVersion}\nplugin version: ${this.pluginId} ${this.packageJson.version}\n`;
let consoleHeader = `${os.platform()} ${os.arch()} ${os.version()}\nserver version: ${serverVersion}\nplugin version: ${this.pluginId} ${this.packageJson.version}\n`;
if (process.env.SCRYPTED_DOCKER_FLAVOR)
consoleHeader += `${process.env.SCRYPTED_DOCKER_FLAVOR}\n`;
this.consoleServer = createConsoleServer(this.worker.stdout, this.worker.stderr, consoleHeader);
const disconnect = () => {

View File

@@ -15,7 +15,7 @@ export function getPluginNodePath(name: string) {
let nodeVersionedDirectory = `node${nodeMajorVersion}-${process.platform}-${process.arch}`;
const scryptedBase = process.env.SCRYPTED_BASE_VERSION;
if (scryptedBase)
nodeVersionedDirectory += '-' + nodeVersionedDirectory;
nodeVersionedDirectory += '-' + scryptedBase;
const nodePrefix = path.join(pluginVolume, nodeVersionedDirectory);
return nodePrefix;
}

View File

@@ -176,6 +176,7 @@ export function startPluginRemote(mainFilename: string, pluginId: string, peerSe
const pluginConsole = getPluginConsole?.();
params.console = pluginConsole;
const pnp = getPluginNodePath(pluginId);
pluginConsole?.log('node modules', pnp);
params.require = (name: string) => {
if (name === 'fakefs' || (name === 'fs' && !packageJson.scrypted.realfs)) {
return volume;

View File

@@ -645,7 +645,13 @@ export function attachPluginRemote(peer: RpcPeer, options?: PluginRemoteAttachOp
params.pluginRuntimeAPI = ret;
return options.onLoadZip(ret, params, packageJson, zipData, zipOptions);
try {
return await options.onLoadZip(ret, params, packageJson, zipData, zipOptions);
}
catch (e) {
console.error('plugin start/fork failed', e)
throw e;
}
},
}

View File

@@ -39,7 +39,10 @@ export class PythonRuntimeWorker extends ChildProcessWorker {
'/usr/local/lib/gstreamer-1.0',
];
for (const gstPath of gstPaths) {
if (fs.existsSync(path.join(gstPath, 'libgstx264.dylib'))) {
// search for common plugins.
if (fs.existsSync(path.join(gstPath, 'libgstx264.dylib'))
|| fs.existsSync(path.join(gstPath, 'libgstlibav.dylib'))
|| fs.existsSync(path.join(gstPath, 'libgstvideotestsrc.dylib'))) {
gstEnv['GST_PLUGIN_PATH'] = gstPath;
break;
}

View File

@@ -446,25 +446,24 @@ async function start(mainFilename: string, options?: {
let hasLogin = await db.getCount(ScryptedUser) > 0;
if (process.env.SCRYPTED_ADMIN_USERNAME && process.env.SCRYPTED_ADMIN_TOKEN) {
let user = await db.tryGet(ScryptedUser, process.env.SCRYPTED_ADMIN_USERNAME);
if (!user) {
user = new ScryptedUser();
user._id = process.env.SCRYPTED_ADMIN_USERNAME;
setScryptedUserPassword(user, crypto.randomBytes(8).toString('hex'), Date.now());
user.token = crypto.randomBytes(16).toString('hex');
await db.upsert(user);
hasLogin = true;
}
}
app.options('/login', (req, res) => {
res.setHeader('Access-Control-Allow-Methods', 'GET, POST, OPTIONS');
res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Authorization, Content-Length, X-Requested-With');
res.send(200);
});
const resetLogin = path.join(getScryptedVolume(), 'reset-login');
async function checkResetLogin() {
try {
if (fs.existsSync(resetLogin)) {
fs.rmSync(resetLogin);
await db.removeAll(ScryptedUser);
hasLogin = false;
}
}
catch (e) {
}
}
app.post('/login', async (req, res) => {
const { username, password, change_password, maxAge: maxAgeRequested } = req.body;
const timestamp = Date.now();
@@ -550,6 +549,19 @@ async function start(mainFilename: string, options?: {
});
});
const resetLogin = path.join(getScryptedVolume(), 'reset-login');
async function checkResetLogin() {
try {
if (fs.existsSync(resetLogin)) {
fs.rmSync(resetLogin);
await db.removeAll(ScryptedUser);
hasLogin = false;
}
}
catch (e) {
}
}
app.get('/login', async (req, res) => {
await checkResetLogin();
@@ -558,7 +570,11 @@ async function start(mainFilename: string, options?: {
// env/header based admin login
if (res.locals.username && res.locals.username === process.env.SCRYPTED_ADMIN_USERNAME) {
const userToken = new UserToken(res.locals.username, undefined, Date.now());
res.send({
...createTokens(userToken),
expiration: ONE_DAY_MILLISECONDS,
username: res.locals.username,
token: process.env.SCRYPTED_ADMIN_TOKEN,
addresses,